From e0c373ec12c7c043022fb9779da97f095b7f5bf8 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 3 Jul 2019 20:09:56 +0100 Subject: [PATCH 001/324] notes 1.5.2: fix typo (#2971) Signed-off-by: Miek Gieben --- notes/coredns-1.5.2.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/notes/coredns-1.5.2.md b/notes/coredns-1.5.2.md index d6224a0b578..7e39588500e 100644 --- a/notes/coredns-1.5.2.md +++ b/notes/coredns-1.5.2.md @@ -10,8 +10,9 @@ author = "coredns" The CoreDNS team has released [CoreDNS-1.5.2](https://github.com/coredns/coredns/releases/tag/v1.5.2). -Small bugfixes and a change to Caddy's important path. - +Small bugfixes and a change to Caddy's import path (mholt/caddy -> caddyserver/caddy). Doing +a release helps plugins deal with the change better. + # Plugins * For all plugins that use the `upstream` directive it use removed from the documentation; it's still accepted From f5fe98395e2777907cdbfee08d37f12b4c1931c7 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 3 Jul 2019 20:12:51 +0100 Subject: [PATCH 002/324] Remove -cpu flag (#2793) The -cpu flag is a weird one (and copied originally from Caddy), it basically sets GOMAXPROCS which can be *easily* done by just setting that environment variable. Also with systemd and containerized env you set this externally *anyway*, so there is little use to do this again in the binary. Also the option's help was confusing (i.e. percentage of what?). Remove the option and supporting files. Signed-off-by: Miek Gieben --- coredns.1.md | 3 --- coremain/run.go | 44 -------------------------------------------- coremain/run_test.go | 44 -------------------------------------------- 3 files changed, 91 deletions(-) delete mode 100644 coremain/run_test.go diff --git a/coredns.1.md b/coredns.1.md index 6fe61e38887..4784e969727 100644 --- a/coredns.1.md +++ b/coredns.1.md @@ -24,9 +24,6 @@ Available options: : specify Corefile to load, if not given CoreDNS will look for a `Corefile` in the current directory. -**-cpu** **CAP** -: specify maximum CPU capacity in percent. - **-dns.port** **PORT** : override default port (53) to listen on. diff --git a/coremain/run.go b/coremain/run.go index e6f51295fdf..5f3b610bd90 100644 --- a/coremain/run.go +++ b/coremain/run.go @@ -2,14 +2,12 @@ package coremain import ( - "errors" "flag" "fmt" "io/ioutil" "log" "os" "runtime" - "strconv" "strings" "github.com/coredns/coredns/core/dnsserver" @@ -24,7 +22,6 @@ func init() { setVersion() flag.StringVar(&conf, "conf", "", "Corefile to load (default \""+caddy.DefaultConfigFile+"\")") - flag.StringVar(&cpu, "cpu", "100%", "CPU cap") flag.BoolVar(&plugins, "plugins", false, "List installed plugins") flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file") flag.BoolVar(&version, "version", false, "Show version") @@ -73,11 +70,6 @@ func Run() { os.Exit(0) } - // Set CPU cap - if err := setCPU(cpu); err != nil { - mustLogFatal(err) - } - // Get Corefile input corefile, err := caddy.LoadCaddyfile(serverType) if err != nil { @@ -194,45 +186,9 @@ func setVersion() { } } -// setCPU parses string cpu and sets GOMAXPROCS -// according to its value. It accepts either -// a number (e.g. 3) or a percent (e.g. 50%). -func setCPU(cpu string) error { - var numCPU int - - availCPU := runtime.NumCPU() - - if strings.HasSuffix(cpu, "%") { - // Percent - var percent float32 - pctStr := cpu[:len(cpu)-1] - pctInt, err := strconv.Atoi(pctStr) - if err != nil || pctInt < 1 || pctInt > 100 { - return errors.New("invalid CPU value: percentage must be between 1-100") - } - percent = float32(pctInt) / 100 - numCPU = int(float32(availCPU) * percent) - } else { - // Number - num, err := strconv.Atoi(cpu) - if err != nil || num < 1 { - return errors.New("invalid CPU value: provide a number or percent greater than 0") - } - numCPU = num - } - - if numCPU > availCPU { - numCPU = availCPU - } - - runtime.GOMAXPROCS(numCPU) - return nil -} - // Flags that control program flow or startup var ( conf string - cpu string logfile bool version bool plugins bool diff --git a/coremain/run_test.go b/coremain/run_test.go deleted file mode 100644 index da01637d8d4..00000000000 --- a/coremain/run_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package coremain - -import ( - "runtime" - "testing" -) - -func TestSetCPU(t *testing.T) { - currentCPU := runtime.GOMAXPROCS(-1) - maxCPU := runtime.NumCPU() - halfCPU := int(0.5 * float32(maxCPU)) - if halfCPU < 1 { - halfCPU = 1 - } - for i, test := range []struct { - input string - output int - shouldErr bool - }{ - {"1", 1, false}, - {"-1", currentCPU, true}, - {"0", currentCPU, true}, - {"100%", maxCPU, false}, - {"50%", halfCPU, false}, - {"110%", currentCPU, true}, - {"-10%", currentCPU, true}, - {"invalid input", currentCPU, true}, - {"invalid input%", currentCPU, true}, - {"9999", maxCPU, false}, // over available CPU - } { - err := setCPU(test.input) - if test.shouldErr && err == nil { - t.Errorf("Test %d: Expected error, but there wasn't any", i) - } - if !test.shouldErr && err != nil { - t.Errorf("Test %d: Expected no error, but there was one: %v", i, err) - } - if actual, expected := runtime.GOMAXPROCS(-1), test.output; actual != expected { - t.Errorf("Test %d: GOMAXPROCS was %d but expected %d", i, actual, expected) - } - // teardown - runtime.GOMAXPROCS(currentCPU) - } -} From 2bd77d0823309c282197256cf9b3d8274578811d Mon Sep 17 00:00:00 2001 From: Anshul Sharma Date: Thu, 4 Jul 2019 00:44:31 +0530 Subject: [PATCH 003/324] Fix multiple credentials in route53 (#2859) --- plugin/route53/setup.go | 59 ++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/plugin/route53/setup.go b/plugin/route53/setup.go index adc2b3e006f..6eb16586315 100644 --- a/plugin/route53/setup.go +++ b/plugin/route53/setup.go @@ -35,21 +35,22 @@ func init() { } func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Route53API) error { - keyPairs := map[string]struct{}{} - keys := map[string][]string{} + for c.Next() { + keyPairs := map[string]struct{}{} + keys := map[string][]string{} - // Route53 plugin attempts to find AWS credentials by using ChainCredentials. - // And the order of that provider chain is as follows: - // Static AWS keys -> Environment Variables -> Credentials file -> IAM role - // With that said, even though a user doesn't define any credentials in - // Corefile, we should still attempt to read the default credentials file, - // ~/.aws/credentials with the default profile. - sharedProvider := &credentials.SharedCredentialsProvider{} - var providers []credentials.Provider - var fall fall.F + // Route53 plugin attempts to find AWS credentials by using ChainCredentials. + // And the order of that provider chain is as follows: + // Static AWS keys -> Environment Variables -> Credentials file -> IAM role + // With that said, even though a user doesn't define any credentials in + // Corefile, we should still attempt to read the default credentials file, + // ~/.aws/credentials with the default profile. + sharedProvider := &credentials.SharedCredentialsProvider{} + var providers []credentials.Provider + var fall fall.F + + up := upstream.New() - up := upstream.New() - for c.Next() { args := c.RemainingArgs() for i := 0; i < len(args); i++ { @@ -99,23 +100,21 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro return c.Errf("unknown property '%s'", c.Val()) } } + providers = append(providers, &credentials.EnvProvider{}, sharedProvider) + client := f(credentials.NewChainCredentials(providers)) + ctx := context.Background() + h, err := New(ctx, client, keys, up) + if err != nil { + return c.Errf("failed to create Route53 plugin: %v", err) + } + h.Fall = fall + if err := h.Run(ctx); err != nil { + return c.Errf("failed to initialize Route53 plugin: %v", err) + } + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + h.Next = next + return h + }) } - providers = append(providers, &credentials.EnvProvider{}, sharedProvider) - - client := f(credentials.NewChainCredentials(providers)) - ctx := context.Background() - h, err := New(ctx, client, keys, up) - if err != nil { - return c.Errf("failed to create Route53 plugin: %v", err) - } - h.Fall = fall - if err := h.Run(ctx); err != nil { - return c.Errf("failed to initialize Route53 plugin: %v", err) - } - dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { - h.Next = next - return h - }) - return nil } From f9fb9db1715e074c7549548e72f1d29d5ddc268f Mon Sep 17 00:00:00 2001 From: Anshul Sharma Date: Thu, 4 Jul 2019 01:09:12 +0530 Subject: [PATCH 004/324] ISSUE-2911 (#2923) - Remove resyncperiod from Kubernetes plugin --- man/coredns-kubernetes.7 | 5 +- plugin/kubernetes/README.md | 4 +- plugin/kubernetes/controller.go | 8 +- plugin/kubernetes/object/informer.go | 8 +- plugin/kubernetes/setup.go | 14 +--- plugin/kubernetes/setup_test.go | 117 ++------------------------- 6 files changed, 17 insertions(+), 139 deletions(-) diff --git a/man/coredns-kubernetes.7 b/man/coredns-kubernetes.7 index 6d7cfc555b8..c430a6c128e 100644 --- a/man/coredns-kubernetes.7 +++ b/man/coredns-kubernetes.7 @@ -47,7 +47,6 @@ all the zones the plugin should be authoritative for. .nf kubernetes [ZONES...] { - resyncperiod DURATION endpoint URL tls CERT KEY CACERT kubeconfig KUBECONFIG CONTEXT @@ -65,9 +64,7 @@ kubernetes [ZONES...] { .fi .RE -.IP \(bu 4 -\fB\fCresyncperiod\fR specifies the Kubernetes data API \fBDURATION\fP period. By -default resync is disabled (DURATION is zero). + .IP \(bu 4 \fB\fCendpoint\fR specifies the \fBURL\fP for a remote k8s API endpoint. If omitted, it will connect to k8s in-cluster using the cluster service account. diff --git a/plugin/kubernetes/README.md b/plugin/kubernetes/README.md index 50a31801f33..15af2565c91 100644 --- a/plugin/kubernetes/README.md +++ b/plugin/kubernetes/README.md @@ -31,7 +31,6 @@ all the zones the plugin should be authoritative for. ``` kubernetes [ZONES...] { - resyncperiod DURATION endpoint URL tls CERT KEY CACERT kubeconfig KUBECONFIG CONTEXT @@ -47,8 +46,7 @@ kubernetes [ZONES...] { } ``` -* `resyncperiod` specifies the Kubernetes data API **DURATION** period. By - default resync is disabled (DURATION is zero). + * `endpoint` specifies the **URL** for a remote k8s API endpoint. If omitted, it will connect to k8s in-cluster using the cluster service account. * `tls` **CERT** **KEY** **CACERT** are the TLS cert, key and the CA cert file names for remote k8s connection. diff --git a/plugin/kubernetes/controller.go b/plugin/kubernetes/controller.go index 63473558499..f5a3e7ffdcc 100644 --- a/plugin/kubernetes/controller.go +++ b/plugin/kubernetes/controller.go @@ -80,7 +80,6 @@ type dnsControl struct { type dnsControlOpts struct { initPodCache bool initEndpointsCache bool - resyncPeriod time.Duration ignoreEmptyService bool // Label handling. @@ -110,7 +109,6 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: serviceWatchFunc(dns.client, api.NamespaceAll, dns.selector), }, &api.Service{}, - opts.resyncPeriod, cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, cache.Indexers{svcNameNamespaceIndex: svcNameNamespaceIndexFunc, svcIPIndex: svcIPIndexFunc}, object.ToService, @@ -123,7 +121,6 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: podWatchFunc(dns.client, api.NamespaceAll, dns.selector), }, &api.Pod{}, - opts.resyncPeriod, cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, cache.Indexers{podIPIndex: podIPIndexFunc}, object.ToPod, @@ -137,7 +134,6 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: endpointsWatchFunc(dns.client, api.NamespaceAll, dns.selector), }, &api.Endpoints{}, - opts.resyncPeriod, cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, cache.Indexers{epNameNamespaceIndex: epNameNamespaceIndexFunc, epIPIndex: epIPIndexFunc}, object.ToEndpoints) @@ -149,7 +145,7 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: namespaceWatchFunc(dns.client, dns.namespaceSelector), }, &api.Namespace{}, - opts.resyncPeriod, + defaultResyncPeriod, cache.ResourceEventHandlerFuncs{}) return &dns @@ -516,3 +512,5 @@ func (dns *dnsControl) updateModifed() { } var errObj = errors.New("obj was not of the correct type") + +const defaultResyncPeriod = 0 diff --git a/plugin/kubernetes/object/informer.go b/plugin/kubernetes/object/informer.go index 9336571dc10..919fce12d24 100644 --- a/plugin/kubernetes/object/informer.go +++ b/plugin/kubernetes/object/informer.go @@ -1,14 +1,12 @@ package object import ( - "time" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" ) // NewIndexerInformer is a copy of the cache.NewIndexInformer function, but allows Process to have a conversion function (ToFunc). -func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, h cache.ResourceEventHandler, indexers cache.Indexers, convert ToFunc) (cache.Indexer, cache.Controller) { +func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, h cache.ResourceEventHandler, indexers cache.Indexers, convert ToFunc) (cache.Indexer, cache.Controller) { clientState := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, indexers) fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, clientState) @@ -17,7 +15,7 @@ func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPe Queue: fifo, ListerWatcher: lw, ObjectType: objType, - FullResyncPeriod: resyncPeriod, + FullResyncPeriod: defaultResyncPeriod, RetryOnError: false, Process: func(obj interface{}) error { for _, d := range obj.(cache.Deltas) { @@ -49,3 +47,5 @@ func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPe } return clientState, cache.New(cfg) } + +const defaultResyncPeriod = 0 diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go index 028f4ae772c..d97bc9139cb 100644 --- a/plugin/kubernetes/setup.go +++ b/plugin/kubernetes/setup.go @@ -131,7 +131,6 @@ func ParseStanza(c *caddy.Controller) (*Kubernetes, error) { opts := dnsControlOpts{ initEndpointsCache: true, ignoreEmptyService: false, - resyncPeriod: defaultResyncPeriod, } k8s.opts = opts @@ -214,16 +213,7 @@ func ParseStanza(c *caddy.Controller) (*Kubernetes, error) { } return nil, c.ArgErr() case "resyncperiod": - args := c.RemainingArgs() - if len(args) > 0 { - rp, err := time.ParseDuration(args[0]) - if err != nil { - return nil, fmt.Errorf("unable to parse resync duration value: '%v': %v", args[0], err) - } - k8s.opts.resyncPeriod = rp - continue - } - return nil, c.ArgErr() + continue case "labels": args := c.RemainingArgs() if len(args) > 0 { @@ -322,5 +312,3 @@ func searchFromResolvConf() []string { plugin.Zones(rc.Search).Normalize() return rc.Search } - -const defaultResyncPeriod = 0 diff --git a/plugin/kubernetes/setup_test.go b/plugin/kubernetes/setup_test.go index fc09f04766f..634401d9d5c 100644 --- a/plugin/kubernetes/setup_test.go +++ b/plugin/kubernetes/setup_test.go @@ -3,7 +3,6 @@ package kubernetes import ( "strings" "testing" - "time" "github.com/coredns/coredns/plugin/pkg/fall" @@ -13,14 +12,13 @@ import ( func TestKubernetesParse(t *testing.T) { tests := []struct { - input string // Corefile data as string - shouldErr bool // true if test case is expected to produce an error. - expectedErrContent string // substring from the expected error. Empty for positive cases. - expectedZoneCount int // expected count of defined zones. - expectedNSCount int // expected count of namespaces. - expectedResyncPeriod time.Duration // expected resync period value - expectedLabelSelector string // expected label selector value - expectedNamespaceLabelSelector string // expected namespace label selector value + input string // Corefile data as string + shouldErr bool // true if test case is expected to produce an error. + expectedErrContent string // substring from the expected error. Empty for positive cases. + expectedZoneCount int // expected count of defined zones. + expectedNSCount int // expected count of namespaces. + expectedLabelSelector string // expected label selector value + expectedNamespaceLabelSelector string // expected namespace label selector value expectedPodMode string expectedFallthrough fall.F }{ @@ -31,7 +29,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -43,7 +40,6 @@ func TestKubernetesParse(t *testing.T) { "", 2, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -56,7 +52,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -70,7 +65,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -84,7 +78,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 1, - defaultResyncPeriod, "", "", podModeDisabled, @@ -98,35 +91,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 2, - defaultResyncPeriod, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod 30s -}`, - false, - "", - 1, - 0, - 30 * time.Second, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod 15m -}`, - false, - "", - 1, - 0, - 15 * time.Minute, "", "", podModeDisabled, @@ -140,7 +104,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "environment=prod", "", podModeDisabled, @@ -154,7 +117,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "application=nginx,environment in (production,qa,staging)", "", podModeDisabled, @@ -168,7 +130,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "istio-injection=enabled", podModeDisabled, @@ -183,7 +144,6 @@ func TestKubernetesParse(t *testing.T) { "Error during parsing: namespaces and namespace_labels cannot both be set", -1, 0, - defaultResyncPeriod, "", "istio-injection=enabled", podModeDisabled, @@ -191,7 +151,6 @@ func TestKubernetesParse(t *testing.T) { }, { `kubernetes coredns.local test.local { - resyncperiod 15m endpoint http://localhost:8080 namespaces demo test labels environment in (production, staging, qa),application=nginx @@ -201,7 +160,6 @@ func TestKubernetesParse(t *testing.T) { "", 2, 2, - 15 * time.Minute, "application=nginx,environment in (production,qa,staging)", "", podModeDisabled, @@ -216,7 +174,6 @@ func TestKubernetesParse(t *testing.T) { "rong argument count or unexpected line ending", -1, -1, - defaultResyncPeriod, "", "", podModeDisabled, @@ -230,49 +187,6 @@ func TestKubernetesParse(t *testing.T) { "rong argument count or unexpected line ending", -1, -1, - defaultResyncPeriod, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod -}`, - true, - "rong argument count or unexpected line ending", - -1, - 0, - 0 * time.Minute, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod 15 -}`, - true, - "unable to parse resync duration value", - -1, - 0, - 0 * time.Second, - "", - "", - podModeDisabled, - fall.Zero, - }, - { - `kubernetes coredns.local { - resyncperiod abc -}`, - true, - "unable to parse resync duration value", - -1, - 0, - 0 * time.Second, "", "", podModeDisabled, @@ -286,7 +200,6 @@ func TestKubernetesParse(t *testing.T) { "rong argument count or unexpected line ending", -1, 0, - 0 * time.Second, "", "", podModeDisabled, @@ -300,7 +213,6 @@ func TestKubernetesParse(t *testing.T) { "unable to parse label selector", -1, 0, - 0 * time.Second, "", "", podModeDisabled, @@ -315,7 +227,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -330,7 +241,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeInsecure, @@ -345,7 +255,6 @@ func TestKubernetesParse(t *testing.T) { "", 1, 0, - defaultResyncPeriod, "", "", podModeVerified, @@ -360,7 +269,6 @@ func TestKubernetesParse(t *testing.T) { "rong value for pods", -1, 0, - defaultResyncPeriod, "", "", podModeVerified, @@ -375,7 +283,6 @@ func TestKubernetesParse(t *testing.T) { "rong argument count", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -389,7 +296,6 @@ kubernetes cluster.local`, "this plugin", -1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -403,7 +309,6 @@ kubernetes cluster.local`, "Wrong argument count or unexpected line ending after", -1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -417,7 +322,6 @@ kubernetes cluster.local`, "Wrong argument count or unexpected line ending after", -1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -431,7 +335,6 @@ kubernetes cluster.local`, "", 1, 0, - defaultResyncPeriod, "", "", podModeDisabled, @@ -480,12 +383,6 @@ kubernetes cluster.local`, t.Errorf("Test %d: Expected kubernetes controller to be initialized with %d namespaces. Instead found %d namespaces: '%v' for input '%s'", i, test.expectedNSCount, foundNSCount, k8sController.Namespaces, test.input) } - // ResyncPeriod - foundResyncPeriod := k8sController.opts.resyncPeriod - if foundResyncPeriod != test.expectedResyncPeriod { - t.Errorf("Test %d: Expected kubernetes controller to be initialized with resync period '%s'. Instead found period '%s' for input '%s'", i, test.expectedResyncPeriod, foundResyncPeriod, test.input) - } - // Labels if k8sController.opts.labelSelector != nil { foundLabelSelectorString := meta.FormatLabelSelector(k8sController.opts.labelSelector) From 18304ce9b75a401ed510d270c013dc4fe6480420 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 4 Jul 2019 06:56:37 +0100 Subject: [PATCH 005/324] plugin/file: make non-existent file non-fatal (#2955) * plugin/file: make non-existent file non-fatal If the zone file being loaded doesn't exist *and* reload is enabled, just wait the file to pop up in the normal Reload routine. If reload is set to 0s; we keep this a fatal error on startup. Aslo fix the ticker in z.Reload(): remove the per second ticks and just use the reload interval for the ticker. Brush up the documentation a bit as well. Fixes: #2951 Signed-off-by: Miek Gieben * Stickler and test compile Signed-off-by: Miek Gieben * Remove there too Signed-off-by: Miek Gieben * Cant README test these because zone files dont exist Signed-off-by: Miek Gieben --- plugin/file/README.md | 16 ++++++++++------ plugin/file/file.go | 9 ++++++--- plugin/file/reload.go | 12 +----------- plugin/file/reload_test.go | 1 - plugin/file/setup.go | 28 ++++++++++++++++++++-------- plugin/file/zone.go | 2 -- test/file_reload_test.go | 3 --- 7 files changed, 37 insertions(+), 34 deletions(-) diff --git a/plugin/file/README.md b/plugin/file/README.md index b8f9b50d5b5..5322a8ac62b 100644 --- a/plugin/file/README.md +++ b/plugin/file/README.md @@ -6,7 +6,7 @@ ## Description -The file plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists +The *file* plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists on disk. If the zone file contains signatures (i.e., is signed using DNSSEC), correct DNSSEC answers are returned. Only NSEC is supported! If you use this setup *you* are responsible for re-signing the zonefile. @@ -44,7 +44,7 @@ file DBFILE [ZONES... ] { Load the `example.org` zone from `example.org.signed` and allow transfers to the internet, but send notifies to 10.240.1.1 -~~~ corefile +~~~ txt example.org { file example.org.signed { transfer to * @@ -55,7 +55,7 @@ example.org { Or use a single zone file for multiple zones: -~~~ +~~~ txt . { file example.org.signed example.org example.net { transfer to * @@ -67,7 +67,7 @@ Or use a single zone file for multiple zones: Note that if you have a configuration like the following you may run into a problem of the origin not being correctly recognized: -~~~ +~~~ txt . { file db.example.org } @@ -78,7 +78,7 @@ which, in this case, is the root zone. Any contents of `db.example.org` will the origin set; this may or may not do what you want. It's better to be explicit here and specify the correct origin. This can be done in two ways: -~~~ +~~~ txt . { file db.example.org example.org } @@ -86,8 +86,12 @@ It's better to be explicit here and specify the correct origin. This can be done Or -~~~ +~~~ txt example.org { file db.example.org } ~~~ + +## Also See + +See the *loadbalance* plugin if you need simple record shuffling. diff --git a/plugin/file/file.go b/plugin/file/file.go index 2fe4b164476..bc582cfaa76 100644 --- a/plugin/file/file.go +++ b/plugin/file/file.go @@ -129,12 +129,15 @@ func Parse(f io.Reader, origin, fileName string, serial int64) (*Zone, error) { return nil, err } - if !seenSOA && serial >= 0 { + if !seenSOA { if s, ok := rr.(*dns.SOA); ok { - if s.Serial == uint32(serial) { // same serial + seenSOA = true + + // -1 is valid serial is we failed to load the file on startup. + + if serial >= 0 && s.Serial == uint32(serial) { // same serial return nil, &serialErr{err: "no change in SOA serial", origin: origin, zone: fileName, serial: serial} } - seenSOA = true } } diff --git a/plugin/file/reload.go b/plugin/file/reload.go index e73c5b87df9..ce5c81335a3 100644 --- a/plugin/file/reload.go +++ b/plugin/file/reload.go @@ -5,15 +5,12 @@ import ( "time" ) -// TickTime is clock resolution. By default ticks every second. Handler checks if reloadInterval has been reached on every tick. -var TickTime = 1 * time.Second - // Reload reloads a zone when it is changed on disk. If z.NoReload is true, no reloading will be done. func (z *Zone) Reload() error { if z.ReloadInterval == 0 { return nil } - tick := time.NewTicker(TickTime) + tick := time.NewTicker(z.ReloadInterval) go func() { @@ -21,13 +18,6 @@ func (z *Zone) Reload() error { select { case <-tick.C: - if z.LastReloaded.Add(z.ReloadInterval).After(time.Now()) { - //reload interval not reached yet - continue - } - //saving timestamp of last attempted reload - z.LastReloaded = time.Now() - zFile := z.File() reader, err := os.Open(zFile) if err != nil { diff --git a/plugin/file/reload_test.go b/plugin/file/reload_test.go index 1139b8a4410..196565cac95 100644 --- a/plugin/file/reload_test.go +++ b/plugin/file/reload_test.go @@ -29,7 +29,6 @@ func TestZoneReload(t *testing.T) { t.Fatalf("Failed to parse zone: %s", err) } - TickTime = 500 * time.Millisecond z.ReloadInterval = 500 * time.Millisecond z.Reload() time.Sleep(time.Second) diff --git a/plugin/file/setup.go b/plugin/file/setup.go index 38ba796217b..9be09fe8d54 100644 --- a/plugin/file/setup.go +++ b/plugin/file/setup.go @@ -57,6 +57,9 @@ func fileParse(c *caddy.Controller) (Zones, error) { config := dnsserver.GetConfig(c) + var openErr error + reload := 1 * time.Minute + for c.Next() { // file db.file [zones...] if !c.NextArg() { @@ -77,22 +80,23 @@ func fileParse(c *caddy.Controller) (Zones, error) { reader, err := os.Open(fileName) if err != nil { - // bail out - return Zones{}, err + openErr = err } for i := range origins { origins[i] = plugin.Host(origins[i]).Normalize() - zone, err := Parse(reader, origins[i], fileName, 0) - if err == nil { - z[origins[i]] = zone - } else { - return Zones{}, err + z[origins[i]] = NewZone(origins[i], fileName) + if openErr == nil { + zone, err := Parse(reader, origins[i], fileName, 0) + if err == nil { + z[origins[i]] = zone + } else { + return Zones{}, err + } } names = append(names, origins[i]) } - reload := 1 * time.Minute upstr := upstream.New() t := []string{} var e error @@ -129,5 +133,13 @@ func fileParse(c *caddy.Controller) (Zones, error) { } } } + if openErr != nil { + if reload == 0 { + // reload hasn't been set make this a fatal error + return Zones{}, plugin.Error("file", openErr) + } + log.Warningf("Failed to open %q: trying again in %s", openErr, reload) + + } return Zones{Z: z, Names: names}, nil } diff --git a/plugin/file/zone.go b/plugin/file/zone.go index b24cd91e89d..186fdf8d097 100644 --- a/plugin/file/zone.go +++ b/plugin/file/zone.go @@ -30,7 +30,6 @@ type Zone struct { Expired *bool ReloadInterval time.Duration - LastReloaded time.Time reloadMu sync.RWMutex reloadShutdown chan bool Upstream *upstream.Upstream // Upstream for looking up external names during the resolution process. @@ -53,7 +52,6 @@ func NewZone(name, file string) *Zone { Tree: &tree.Tree{}, Expired: new(bool), reloadShutdown: make(chan bool), - LastReloaded: time.Now(), } *z.Expired = false return z diff --git a/test/file_reload_test.go b/test/file_reload_test.go index 91372ecc302..e61003b8bee 100644 --- a/test/file_reload_test.go +++ b/test/file_reload_test.go @@ -5,15 +5,12 @@ import ( "testing" "time" - "github.com/coredns/coredns/plugin/file" "github.com/coredns/coredns/plugin/test" "github.com/miekg/dns" ) func TestZoneReload(t *testing.T) { - file.TickTime = 1 * time.Second - name, rm, err := test.TempFile(".", exampleOrg) if err != nil { t.Fatalf("Failed to create zone: %s", err) From 9b6e2aa5a6401a34b8aa5b66ce554caf5e9885ef Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Thu, 4 Jul 2019 14:44:31 +0800 Subject: [PATCH 006/324] Update github.com/miekg/dns, github.com/aws/aws-sdk-go, and google.golang.org/grpc (#2960) This fix updates: - github.com/miekg/dns to 1.15 (#2947) - github.com/aws/aws-sdk-go to 1.20.13 (#2953) - google.golang.org/grpc to 1.22.0 (#2966) This fix fixes #2947 This fix fixes #2953 This fix fixes #2966 Signed-off-by: Yong Tang --- go.mod | 21 +++++++++------------ go.sum | 54 +++++++++++++++++++++++++++++++++++++----------------- 2 files changed, 46 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index c94db7a9062..2449a03e815 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module github.com/coredns/coredns go 1.12 require ( - cloud.google.com/go v0.40.0 // indirect + cloud.google.com/go v0.41.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.20.5 + github.com/aws/aws-sdk-go v1.20.15 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible @@ -20,7 +20,6 @@ require ( github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/golang/protobuf v1.3.1 - github.com/google/btree v1.0.0 // indirect github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/googleapis/gnostic v0.2.0 // indirect github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect @@ -32,7 +31,7 @@ require ( github.com/jonboulle/clockwork v0.1.0 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/miekg/dns v1.1.14 + github.com/miekg/dns v1.1.15 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 // indirect @@ -52,13 +51,11 @@ require ( go.uber.org/atomic v1.3.2 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.9.1 // indirect - golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443 // indirect - golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b // indirect - golang.org/x/sys v0.0.0-20190618155005-516e3c20635f - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect - google.golang.org/appengine v1.6.1 // indirect - google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3 // indirect - google.golang.org/grpc v1.21.1 + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect + golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect + golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb + google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect + google.golang.org/grpc v1.22.0 gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190313235455-40a48860b5ab @@ -70,4 +67,4 @@ require ( sigs.k8s.io/yaml v1.1.0 // indirect ) -replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.14 +replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.15 diff --git a/go.sum b/go.sum index 2ab83ac5905..dc12d00a7cb 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,10 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.40.0 h1:FjSY7bOj+WzJe6TZRVtXI2b9kAYvtNg4lMbcH2+MUkk= -cloud.google.com/go v0.40.0/go.mod h1:Tk58MuI9rbLMKlAjeO/bDnteAx7tX2gJIXw4T5Jwlro= +cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= +cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Shopify/sarama v1.21.0 h1:0GKs+e8mn1RRUzfg9oUXv3v7ZieQLmOZF/bfnmmGhM8= @@ -14,8 +15,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.20.5 h1:Ytq5AxpA2pr4vRJM9onvgAjjVRZKKO63WStbG/jLHw0= -github.com/aws/aws-sdk-go v1.20.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.15 h1:y9ts8MJhB7ReUidS6Rq+0KxdFeL01J+pmOlGq6YqpiQ= +github.com/aws/aws-sdk-go v1.20.15/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -76,6 +77,7 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9 github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -90,9 +92,11 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeq github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd h1:tkA3C/XTk8iACLOlTez37pL+0iGSYkkRGKdXgJ6ZylM= @@ -149,8 +153,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJC3+BNM6LJs1x21rivK7yxfTZMAuY2s= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.14 h1:wkQWn9wIp4mZbwW8XV6Km6owkvRPbOiV004ZM2CkGvA= -github.com/miekg/dns v1.1.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= +github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= @@ -219,6 +223,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -231,14 +236,17 @@ golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443 h1:IcSOAf4PyMp3U3XbIEj1/xJ2BjNN2jWv7JoyOsMxXUU= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -250,12 +258,14 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b h1:lkjdUzSyJ5P1+eal9fxXX9Xg2BTfswsonKUse48C0uE= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -275,12 +285,15 @@ golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190618155005-516e3c20635f h1:dHNZYIYdq2QuU6w73vZ/DzesPbVlZVYZTtTZmrnsbQ8= -golang.org/x/sys v0.0.0-20190618155005-516e3c20635f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -293,11 +306,15 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -306,15 +323,17 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3 h1:0LGHEA/u5XLibPOx6D7D8FBT/ax6wT57vNKY0QckCwo= -google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD5x+fRy6U0SXxZehzUHRSYUJcKfII= +google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 h1:2LhklnAJsRSelbnBrrE5QuRleRDkmOh2JWxOtIX6yec= gopkg.in/DataDog/dd-trace-go.v1 v1.15.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -339,6 +358,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc= k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjGoLSdVOv2DbPaWHJ+ZtgKg= From f4a6bea4b414702870561f10cb45321591af378b Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Thu, 4 Jul 2019 14:50:49 +0800 Subject: [PATCH 007/324] gofmt fix for `No newline at end of file` (#2973) This fix fixes the `\ No newline at end of file` in plugin/chaos/zowners.go, by adding `"\n"` to the end of owners_generate.go. Also fixes a gofmt issue in plugin/etcd/setup.go Signed-off-by: Yong Tang --- owners_generate.go | 2 ++ plugin/chaos/zowners.go | 2 +- plugin/etcd/setup.go | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/owners_generate.go b/owners_generate.go index e198f435f85..9764179a0be 100644 --- a/owners_generate.go +++ b/owners_generate.go @@ -58,6 +58,8 @@ var Owners = []string{` } golist += fmt.Sprintf("%q%s", a, c) } + // to prevent `No newline at end of file` with gofmt + golist += "\n" if err := ioutil.WriteFile("plugin/chaos/zowners.go", []byte(golist), 0644); err != nil { log.Fatal(err) diff --git a/plugin/chaos/zowners.go b/plugin/chaos/zowners.go index 7d5f296caf5..d747069dc7d 100644 --- a/plugin/chaos/zowners.go +++ b/plugin/chaos/zowners.go @@ -1,4 +1,4 @@ package chaos // Owners are all GitHub handlers of all maintainers. -var Owners = []string{"bradbeam", "chrisohaver", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "yongtang"} \ No newline at end of file +var Owners = []string{"bradbeam", "chrisohaver", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "yongtang"} diff --git a/plugin/etcd/setup.go b/plugin/etcd/setup.go index 010833bbc86..18d3340c569 100644 --- a/plugin/etcd/setup.go +++ b/plugin/etcd/setup.go @@ -9,8 +9,8 @@ import ( mwtls "github.com/coredns/coredns/plugin/pkg/tls" "github.com/coredns/coredns/plugin/pkg/upstream" - etcdcv3 "github.com/coreos/etcd/clientv3" "github.com/caddyserver/caddy" + etcdcv3 "github.com/coreos/etcd/clientv3" ) var log = clog.NewWithPlugin("etcd") From e1ab8a8ce2f25d860bad4aa1e65fd258c9e0b9a4 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2019 06:31:03 +0000 Subject: [PATCH 008/324] build(deps): bump github.com/aws/aws-sdk-go from 1.20.15 to 1.20.17 (#2990) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.20.15 to 1.20.17. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.20.15...v1.20.17) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 2449a03e815..4b4809ef63c 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.20.15 + github.com/aws/aws-sdk-go v1.20.17 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index dc12d00a7cb..94ecb1ba375 100644 --- a/go.sum +++ b/go.sum @@ -17,6 +17,8 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.20.15 h1:y9ts8MJhB7ReUidS6Rq+0KxdFeL01J+pmOlGq6YqpiQ= github.com/aws/aws-sdk-go v1.20.15/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.17 h1:ZQ0cM9FpuufVDHlNViD8vD68IkEQL/VUOMwJPBqkaaM= +github.com/aws/aws-sdk-go v1.20.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 1ee9b534abc9b00ca336e2732c0b955dc8a185f1 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2019 06:32:57 +0000 Subject: [PATCH 009/324] build(deps): bump github.com/golang/protobuf from 1.3.1 to 1.3.2 (#2989) Bumps [github.com/golang/protobuf](https://github.com/golang/protobuf) from 1.3.1 to 1.3.2. - [Release notes](https://github.com/golang/protobuf/releases) - [Commits](https://github.com/golang/protobuf/compare/v1.3.1...v1.3.2) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 4b4809ef63c..7c0ae4d14cc 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect - github.com/golang/protobuf v1.3.1 + github.com/golang/protobuf v1.3.2 github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/googleapis/gnostic v0.2.0 // indirect github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect diff --git a/go.sum b/go.sum index 94ecb1ba375..2bd909f9938 100644 --- a/go.sum +++ b/go.sum @@ -83,6 +83,8 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= From a5c405f6d7a019e8dec46eabdcb801a8870a38a1 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 11 Jul 2019 12:54:47 +0000 Subject: [PATCH 010/324] Move to dns.Truncate (#2942) Ditch our truncation code and use the upstream one in miekg/dns. This saves code on our end, end upstream is also more efficient as every RR is Len-ed only once. With our bin-search this is not guaranteed. Signed-off-by: Miek Gieben --- request/request.go | 102 +++------------------------------------------ request/writer.go | 7 ++-- 2 files changed, 9 insertions(+), 100 deletions(-) diff --git a/request/request.go b/request/request.go index 0ec98b310eb..1448cad6512 100644 --- a/request/request.go +++ b/request/request.go @@ -226,22 +226,17 @@ func (r *Request) SizeAndDo(m *dns.Msg) bool { // Scrub scrubs the reply message so that it will fit the client's buffer. It will first // check if the reply fits without compression and then *with* compression. -// Scrub will then use binary search to find a save cut off point in the additional section. -// If even *without* the additional section the reply still doesn't fit we -// repeat this process for the answer section. If we scrub the answer section -// we set the TC bit on the reply; indicating the client should retry over TCP. // Note, the TC bit will be set regardless of protocol, even TCP message will // get the bit, the client should then retry with pigeons. func (r *Request) Scrub(reply *dns.Msg) *dns.Msg { - size := r.Size() + reply.Truncate(r.Size()) - reply.Compress = false - rl := reply.Len() - if size >= rl { - if r.Proto() != "udp" { - return reply - } + if reply.Compress { + return reply + } + if r.Proto() == "udp" { + rl := reply.Len() // Last ditch attempt to avoid fragmentation, if the size is bigger than the v4/v6 UDP fragmentation // limit and sent via UDP compress it (in the hope we go under that limit). Limits taken from NSD: // @@ -254,91 +249,8 @@ func (r *Request) Scrub(reply *dns.Msg) *dns.Msg { if rl > 1220 && r.Family() == 2 { reply.Compress = true } - - return reply - } - - reply.Compress = true - rl = reply.Len() - if size >= rl { - return reply - } - - // Account for the OPT record that gets added in SizeAndDo(), subtract that length. - re := len(reply.Extra) - if r.Req.IsEdns0() != nil { - size -= optLen - // re can never be 0 because we have an OPT RR. - re-- - } - - l, m := 0, 0 - origExtra := reply.Extra - for l <= re { - m = (l + re) / 2 - reply.Extra = origExtra[:m] - rl = reply.Len() - if rl < size { - l = m + 1 - continue - } - if rl > size { - re = m - 1 - continue - } - if rl == size { - break - } - } - - // The binary search only breaks on an exact match, which will be - // pretty rare. Normally, the loop will exit when l > re, meaning that - // in the previous iteration either: - // rl < size: no need to do anything. - // rl > size: the final size is too large, and if m > 0, the preceding - // iteration the size was too small. Select that preceding size. - if rl > size && m > 0 { - reply.Extra = origExtra[:m-1] - rl = reply.Len() - } - - if rl <= size { - return reply - } - - ra := len(reply.Answer) - l, m = 0, 0 - origAnswer := reply.Answer - for l <= ra { - m = (l + ra) / 2 - reply.Answer = origAnswer[:m] - rl = reply.Len() - if rl < size { - l = m + 1 - continue - } - if rl > size { - ra = m - 1 - continue - } - if rl == size { - break - } } - // The binary search only breaks on an exact match, which will be - // pretty rare. Normally, the loop will exit when l > ra, meaning that - // in the previous iteration either: - // rl < size: no need to do anything. - // rl > size: the final size is too large, and if m > 0, the preceding - // iteration the size was too small. Select that preceding size. - if rl > size && m > 0 { - reply.Answer = origAnswer[:m-1] - // No need to recalc length, as we don't use it. We set truncated anyway. Doing - // this extra m-1 step does make it fit in the client's buffer however. - } - - reply.Truncated = true return reply } @@ -460,5 +372,3 @@ func (r *Request) Match(reply *dns.Msg) bool { return true } - -const optLen = 12 // OPT record length. diff --git a/request/writer.go b/request/writer.go index 6caba0c2e62..587b3b5d811 100644 --- a/request/writer.go +++ b/request/writer.go @@ -15,8 +15,7 @@ func NewScrubWriter(req *dns.Msg, w dns.ResponseWriter) *ScrubWriter { return &S // scrub on the message m and will then write it to the client. func (s *ScrubWriter) WriteMsg(m *dns.Msg) error { state := Request{Req: s.req, W: s.ResponseWriter} - - n := state.Scrub(m) - state.SizeAndDo(n) - return s.ResponseWriter.WriteMsg(n) + state.SizeAndDo(m) + state.Scrub(m) + return s.ResponseWriter.WriteMsg(m) } From 69a2397bf56c50959a14fe770e82e45a80bf4416 Mon Sep 17 00:00:00 2001 From: Ruslan Drozhdzh <30860269+rdrozhdzh@users.noreply.github.com> Date: Fri, 12 Jul 2019 15:22:45 +0300 Subject: [PATCH 011/324] plugin/rewrite: fix domain length validation (#2995) * unit test * fix domain length validation * code optimization * remove unit test --- plugin/rewrite/name.go | 15 --------------- plugin/rewrite/rewrite.go | 7 +++---- 2 files changed, 3 insertions(+), 19 deletions(-) diff --git a/plugin/rewrite/name.go b/plugin/rewrite/name.go index 86a5f1ecd91..e1c2a11143c 100644 --- a/plugin/rewrite/name.go +++ b/plugin/rewrite/name.go @@ -9,8 +9,6 @@ import ( "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/request" - - "github.com/miekg/dns" ) type exactNameRule struct { @@ -264,19 +262,6 @@ func (rule *substringNameRule) GetResponseRule() ResponseRule { return ResponseR // GetResponseRule return a rule to rewrite the response with. func (rule *regexNameRule) GetResponseRule() ResponseRule { return rule.ResponseRule } -// validName returns true if s is valid domain name and shorter than 256 characters. -func validName(s string) bool { - _, ok := dns.IsDomainName(s) - if !ok { - return false - } - if len(dns.Name(s).String()) > 255 { - return false - } - - return true -} - // hasClosingDot return true if s has a closing dot at the end. func hasClosingDot(s string) bool { if strings.HasSuffix(s, ".") { diff --git a/plugin/rewrite/rewrite.go b/plugin/rewrite/rewrite.go index 90652332658..e72e975b26a 100644 --- a/plugin/rewrite/rewrite.go +++ b/plugin/rewrite/rewrite.go @@ -44,11 +44,10 @@ func (rw Rewrite) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg for _, rule := range rw.Rules { switch result := rule.Rewrite(ctx, state); result { case RewriteDone: - if !validName(state.Req.Question[0].Name) { - x := state.Req.Question[0].Name - log.Errorf("Invalid name after rewrite: %s", x) + if _, ok := dns.IsDomainName(state.Req.Question[0].Name); !ok { + err := fmt.Errorf("invalid name after rewrite: %s", state.Req.Question[0].Name) state.Req.Question[0] = wr.originalQuestion - return dns.RcodeServerFailure, fmt.Errorf("invalid name after rewrite: %s", x) + return dns.RcodeServerFailure, err } respRule := rule.GetResponseRule() if respRule.Active { From 2213a12d21a9a95e2246ebe6bb3e47778f57dae8 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 12 Jul 2019 12:33:27 +0000 Subject: [PATCH 012/324] Scrub: TC bit is always set (#3001) miekg/dns recently became more aggressive in setting TC; anything that's chopped of a response results in TC. Amend the tests. Disputable if these TC checks still add something. Signed-off-by: Miek Gieben --- request/request_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/request/request_test.go b/request/request_test.go index 4411c6a8246..a62fc51bfc1 100644 --- a/request/request_test.go +++ b/request/request_test.go @@ -98,8 +98,8 @@ func TestRequestScrubExtra(t *testing.T) { if want, got := req.Size(), reply.Len(); want < got { t.Errorf("Want scrub to reduce message length below %d bytes, got %d bytes", want, got) } - if reply.Truncated { - t.Errorf("Want scrub to not set truncated bit") + if !reply.Truncated { + t.Errorf("Want scrub to set truncated bit") } } @@ -120,8 +120,8 @@ func TestRequestScrubExtraEdns0(t *testing.T) { if want, got := req.Size(), reply.Len(); want < got { t.Errorf("Want scrub to reduce message length below %d bytes, got %d bytes", want, got) } - if reply.Truncated { - t.Errorf("Want scrub to not set truncated bit") + if !reply.Truncated { + t.Errorf("Want scrub to set truncated bit") } } @@ -146,8 +146,8 @@ func TestRequestScrubExtraRegression(t *testing.T) { if want, got := req.Size(), reply.Len(); want < got { t.Errorf("Want scrub to reduce message length below %d bytes, got %d bytes", want, got) } - if reply.Truncated { - t.Errorf("Want scrub to not set truncated bit") + if !reply.Truncated { + t.Errorf("Want scrub to set truncated bit") } } From db417f97af5e4c9d694be143c04d0be03fafa1a7 Mon Sep 17 00:00:00 2001 From: Charlie Vieth Date: Sun, 14 Jul 2019 04:44:00 -0400 Subject: [PATCH 013/324] rewrite: remove condition file (#3003) Nothing it declares is used by CoreDNS and it does not appear to be used by any third-party packages (according to godoc.org). --- plugin/rewrite/condition.go | 111 ------------------------------------ 1 file changed, 111 deletions(-) delete mode 100644 plugin/rewrite/condition.go diff --git a/plugin/rewrite/condition.go b/plugin/rewrite/condition.go deleted file mode 100644 index 0d9e4b18ecb..00000000000 --- a/plugin/rewrite/condition.go +++ /dev/null @@ -1,111 +0,0 @@ -package rewrite - -import ( - "context" - "fmt" - "regexp" - "strings" - - "github.com/coredns/coredns/plugin/pkg/replacer" - "github.com/coredns/coredns/request" - - "github.com/miekg/dns" -) - -// Operators that are defined. -const ( - Is = "is" - Not = "not" - Has = "has" - NotHas = "not_has" - StartsWith = "starts_with" - EndsWith = "ends_with" - Match = "match" - NotMatch = "not_match" -) - -var repl = replacer.New() - -// condition is a rewrite condition. -type condition func(string, string) bool - -var conditions = map[string]condition{ - Is: isFunc, - Not: notFunc, - Has: hasFunc, - NotHas: notHasFunc, - StartsWith: startsWithFunc, - EndsWith: endsWithFunc, - Match: matchFunc, - NotMatch: notMatchFunc, -} - -// isFunc is condition for Is operator. It checks for equality. -func isFunc(a, b string) bool { return a == b } - -// notFunc is condition for Not operator. It checks for inequality. -func notFunc(a, b string) bool { return a != b } - -// hasFunc is condition for Has operator. It checks if b is a substring of a. -func hasFunc(a, b string) bool { return strings.Contains(a, b) } - -// notHasFunc is condition for NotHas operator. It checks if b is not a substring of a. -func notHasFunc(a, b string) bool { return !strings.Contains(a, b) } - -// startsWithFunc is condition for StartsWith operator. It checks if b is a prefix of a. -func startsWithFunc(a, b string) bool { return strings.HasPrefix(a, b) } - -// endsWithFunc is condition for EndsWith operator. It checks if b is a suffix of a. -func endsWithFunc(a, b string) bool { - // TODO(miek): IsSubDomain - return strings.HasSuffix(a, b) -} - -// matchFunc is condition for Match operator. It does regexp matching of a against pattern in b -// and returns if they match. -func matchFunc(a, b string) bool { - matched, _ := regexp.MatchString(b, a) - return matched -} - -// notMatchFunc is condition for NotMatch operator. It does regexp matching of a against pattern in b -// and returns if they do not match. -func notMatchFunc(a, b string) bool { - matched, _ := regexp.MatchString(b, a) - return !matched -} - -// If is statement for a rewrite condition. -type If struct { - A string - Operator string - B string -} - -// True returns true if the condition is true and false otherwise. -// If r is not nil, it replaces placeholders before comparison. -func (i If) True(r *dns.Msg) bool { - if c, ok := conditions[i.Operator]; ok { - a, b := i.A, i.B - if r != nil { - ctx := context.TODO() - state := request.Request{Req: r, W: nil} // hmm W nil? - a = repl.Replace(ctx, state, nil, i.A) - b = repl.Replace(ctx, state, nil, i.B) - } - return c(a, b) - } - return false -} - -// NewIf creates a new If condition. -func NewIf(a, operator, b string) (If, error) { - if _, ok := conditions[operator]; !ok { - return If{}, fmt.Errorf("invalid operator %v", operator) - } - return If{ - A: a, - Operator: operator, - B: b, - }, nil -} From 2874c963c163008af445a40af7012bcaa577ca2d Mon Sep 17 00:00:00 2001 From: Jintao Zhang Date: Mon, 15 Jul 2019 13:51:08 +0800 Subject: [PATCH 014/324] doc:remove duplicate word (#3005) Signed-off-by: Jintao Zhang --- plugin/route53/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/route53/README.md b/plugin/route53/README.md index d3a1b630c12..8a7863081b8 100644 --- a/plugin/route53/README.md +++ b/plugin/route53/README.md @@ -49,7 +49,7 @@ route53 [ZONE:HOSTED_ZONE_ID...] { ## Examples -Enable route53 with implicit AWS credentials and and resolve CNAMEs via 10.0.0.1: +Enable route53 with implicit AWS credentials and resolve CNAMEs via 10.0.0.1: ~~~ txt . { From 0674325efb2c1de77342ee038605163b71bd48ce Mon Sep 17 00:00:00 2001 From: Anshul Sharma Date: Mon, 15 Jul 2019 08:56:28 +0300 Subject: [PATCH 015/324] plugin/route53: fix IAM credential file (#2983) - Fix the ability for CoreDNS to fetch credentials via IAM --- plugin/route53/setup.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugin/route53/setup.go b/plugin/route53/setup.go index 6eb16586315..1872dce4e35 100644 --- a/plugin/route53/setup.go +++ b/plugin/route53/setup.go @@ -12,6 +12,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/route53" "github.com/aws/aws-sdk-go/service/route53/route53iface" @@ -100,7 +102,9 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro return c.Errf("unknown property '%s'", c.Val()) } } - providers = append(providers, &credentials.EnvProvider{}, sharedProvider) + providers = append(providers, &credentials.EnvProvider{}, sharedProvider, &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(&aws.Config{})), + }) client := f(credentials.NewChainCredentials(providers)) ctx := context.Background() h, err := New(ctx, client, keys, up) From 767d3998770a9bb7dac3ec7401c63d59b12bc215 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Tue, 16 Jul 2019 06:47:01 +0000 Subject: [PATCH 016/324] build(deps): bump github.com/aws/aws-sdk-go from 1.20.17 to 1.20.20 (#3006) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.20.17 to 1.20.20. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.20.17...v1.20.20) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 7c0ae4d14cc..4a1a3b87d24 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.20.17 + github.com/aws/aws-sdk-go v1.20.20 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index 2bd909f9938..d13a72c9aa8 100644 --- a/go.sum +++ b/go.sum @@ -19,6 +19,8 @@ github.com/aws/aws-sdk-go v1.20.15 h1:y9ts8MJhB7ReUidS6Rq+0KxdFeL01J+pmOlGq6Yqpi github.com/aws/aws-sdk-go v1.20.15/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.20.17 h1:ZQ0cM9FpuufVDHlNViD8vD68IkEQL/VUOMwJPBqkaaM= github.com/aws/aws-sdk-go v1.20.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.20 h1:OAR/GtjMOhenkp1NNKr1N1FgIP3mQXHeGbRhvVIAQp0= +github.com/aws/aws-sdk-go v1.20.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 21e9c6047b5bb4af4b798229828630fdfe633a7e Mon Sep 17 00:00:00 2001 From: Charlie Vieth Date: Tue, 16 Jul 2019 15:00:22 -0400 Subject: [PATCH 017/324] Fix log plugin benchmark and slightly improve performance (#3004) * log: use ioutil.Discard as write buffer in benchmark Using a buffer gives unrealistic stats and consumes a large amount of memory. * log: lazily check if a msg should be logged * log: improve variable name Change 'ok' to the more descriptive 'shouldLog'. * log: code comments: don't reuse variable --- plugin/log/log.go | 12 ++++++++---- plugin/log/log_test.go | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/plugin/log/log.go b/plugin/log/log.go index 49581dfc49e..7e62011387a 100644 --- a/plugin/log/log.go +++ b/plugin/log/log.go @@ -26,20 +26,24 @@ type Logger struct { // ServeDNS implements the plugin.Handler interface. func (l Logger) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { state := request.Request{W: w, Req: r} + name := state.Name() for _, rule := range l.Rules { - if !plugin.Name(rule.NameScope).Matches(state.Name()) { + if !plugin.Name(rule.NameScope).Matches(name) { continue } rrw := dnstest.NewRecorder(w) rc, err := plugin.NextOrFailure(l.Name(), l.Next, ctx, rrw, r) - tpe, _ := response.Typify(rrw.Msg, time.Now().UTC()) - class := response.Classify(tpe) // If we don't set up a class in config, the default "all" will be added // and we shouldn't have an empty rule.Class. _, ok := rule.Class[response.All] - _, ok1 := rule.Class[class] + var ok1 bool + if !ok { + tpe, _ := response.Typify(rrw.Msg, time.Now().UTC()) + class := response.Classify(tpe) + _, ok1 = rule.Class[class] + } if ok || ok1 { logstr := l.repl.Replace(ctx, state, rrw, rule.Format) clog.Infof(logstr) diff --git a/plugin/log/log_test.go b/plugin/log/log_test.go index e7f29fff12e..16efb2026f2 100644 --- a/plugin/log/log_test.go +++ b/plugin/log/log_test.go @@ -3,6 +3,7 @@ package log import ( "bytes" "context" + "io/ioutil" "log" "strings" "testing" @@ -239,8 +240,7 @@ func TestLogged(t *testing.T) { } func BenchmarkLogged(b *testing.B) { - var f bytes.Buffer - log.SetOutput(&f) + log.SetOutput(ioutil.Discard) rule := Rule{ NameScope: ".", From a2af651ecb5565c495d8f670ee740045d188dd1e Mon Sep 17 00:00:00 2001 From: Charlie Vieth Date: Wed, 17 Jul 2019 02:57:46 -0400 Subject: [PATCH 018/324] replacer: evaluate format once and improve perf by ~3x (#3002) * replacer: evaluate format once and improve perf by ~3x This improves the performance of logging by almost 3x and reduces memory usage by ~8x. Benchmark results: benchmark old ns/op new ns/op delta BenchmarkReplacer-12 644 324 -49.69% BenchmarkReplacer_CommonLogFormat-12 4228 1471 -65.21% benchmark old allocs new allocs delta BenchmarkReplacer-12 8 2 -75.00% BenchmarkReplacer_CommonLogFormat-12 51 17 -66.67% benchmark old bytes new bytes delta BenchmarkReplacer-12 240 48 -80.00% BenchmarkReplacer_CommonLogFormat-12 3723 446 -88.02% * replacer: code review comments * bufPool: document why we use a pointer to a slice * parseFormat: fix confusing comment * TestParseFormat_Nodes: rename to TestParseFormatNodes * replacer: use a value for bufPool instead of a pointer * replacer: remove comment * replacer: replace labels with knownLabels The previous slice of label names is no longer needed. --- plugin/pkg/replacer/replacer.go | 328 ++++++++++++++++----------- plugin/pkg/replacer/replacer_test.go | 207 ++++++++++++++++- 2 files changed, 404 insertions(+), 131 deletions(-) diff --git a/plugin/pkg/replacer/replacer.go b/plugin/pkg/replacer/replacer.go index 3e2cbfcbd61..81261ca799a 100644 --- a/plugin/pkg/replacer/replacer.go +++ b/plugin/pkg/replacer/replacer.go @@ -4,6 +4,7 @@ import ( "context" "strconv" "strings" + "sync" "time" "github.com/coredns/coredns/plugin/metadata" @@ -14,195 +15,262 @@ import ( ) // Replacer replaces labels for values in strings. -type Replacer struct { - valueFunc func(request.Request, *dnstest.Recorder, string) string - labels []string +type Replacer struct{} + +// New makes a new replacer. This only needs to be called once in the setup and +// then call Replace for each incoming message. A replacer is safe for concurrent use. +func New() Replacer { + return Replacer{} +} + +// Replace performs a replacement of values on s and returns the string with the replaced values. +func (r Replacer) Replace(ctx context.Context, state request.Request, rr *dnstest.Recorder, s string) string { + return loadFormat(s).Replace(ctx, state, rr) } +const ( + headerReplacer = "{>" + // EmptyValue is the default empty value. + EmptyValue = "-" +) + // labels are all supported labels that can be used in the default Replacer. -var labels = []string{ - "{type}", - "{name}", - "{class}", - "{proto}", - "{size}", - "{remote}", - "{port}", - "{local}", +var labels = map[string]struct{}{ + "{type}": {}, + "{name}": {}, + "{class}": {}, + "{proto}": {}, + "{size}": {}, + "{remote}": {}, + "{port}": {}, + "{local}": {}, // Header values. - headerReplacer + "id}", - headerReplacer + "opcode}", - headerReplacer + "do}", - headerReplacer + "bufsize}", + headerReplacer + "id}": {}, + headerReplacer + "opcode}": {}, + headerReplacer + "do}": {}, + headerReplacer + "bufsize}": {}, // Recorded replacements. - "{rcode}", - "{rsize}", - "{duration}", - headerReplacer + "rflags}", + "{rcode}": {}, + "{rsize}": {}, + "{duration}": {}, + headerReplacer + "rflags}": {}, } -// value returns the current value of label. -func value(state request.Request, rr *dnstest.Recorder, label string) string { +// appendValue appends the current value of label. +func appendValue(b []byte, state request.Request, rr *dnstest.Recorder, label string) []byte { switch label { case "{type}": - return state.Type() + return append(b, state.Type()...) case "{name}": - return state.Name() + return append(b, state.Name()...) case "{class}": - return state.Class() + return append(b, state.Class()...) case "{proto}": - return state.Proto() + return append(b, state.Proto()...) case "{size}": - return strconv.Itoa(state.Req.Len()) + return strconv.AppendInt(b, int64(state.Req.Len()), 10) case "{remote}": - return addrToRFC3986(state.IP()) + return appendAddrToRFC3986(b, state.IP()) case "{port}": - return state.Port() + return append(b, state.Port()...) case "{local}": - return addrToRFC3986(state.LocalIP()) + return appendAddrToRFC3986(b, state.LocalIP()) // Header placeholders (case-insensitive). case headerReplacer + "id}": - return strconv.Itoa(int(state.Req.Id)) + return strconv.AppendInt(b, int64(state.Req.Id), 10) case headerReplacer + "opcode}": - return strconv.Itoa(state.Req.Opcode) + return strconv.AppendInt(b, int64(state.Req.Opcode), 10) case headerReplacer + "do}": - return boolToString(state.Do()) + return strconv.AppendBool(b, state.Do()) case headerReplacer + "bufsize}": - return strconv.Itoa(state.Size()) + return strconv.AppendInt(b, int64(state.Size()), 10) // Recorded replacements. case "{rcode}": if rr == nil { - return EmptyValue + return append(b, EmptyValue...) } - rcode := dns.RcodeToString[rr.Rcode] - if rcode == "" { - rcode = strconv.Itoa(rr.Rcode) + if rcode := dns.RcodeToString[rr.Rcode]; rcode != "" { + return append(b, rcode...) } - return rcode + return strconv.AppendInt(b, int64(rr.Rcode), 10) case "{rsize}": if rr == nil { - return EmptyValue + return append(b, EmptyValue...) } - return strconv.Itoa(rr.Len) + return strconv.AppendInt(b, int64(rr.Len), 10) case "{duration}": if rr == nil { - return EmptyValue + return append(b, EmptyValue...) } - return strconv.FormatFloat(time.Since(rr.Start).Seconds(), 'f', -1, 64) + "s" + secs := time.Since(rr.Start).Seconds() + return append(strconv.AppendFloat(b, secs, 'f', -1, 64), 's') case headerReplacer + "rflags}": if rr != nil && rr.Msg != nil { - return flagsToString(rr.Msg.MsgHdr) - } - return EmptyValue - } - return EmptyValue -} - -// New makes a new replacer. This only needs to be called once in the setup and then call Replace for each incoming message. -// A replacer is safe for concurrent use. -func New() Replacer { - return Replacer{ - valueFunc: value, - labels: labels, - } -} - -// Replace performs a replacement of values on s and returns the string with the replaced values. -func (r Replacer) Replace(ctx context.Context, state request.Request, rr *dnstest.Recorder, s string) string { - for _, placeholder := range r.labels { - if strings.Contains(s, placeholder) { - s = strings.Replace(s, placeholder, r.valueFunc(state, rr, placeholder), -1) - } - } - - // Metadata label replacements. Scan for {/ and search for next }, replace that metadata label with - // any meta data that is available. - b := strings.Builder{} - for strings.Contains(s, labelReplacer) { - idxStart := strings.Index(s, labelReplacer) - endOffset := idxStart + len(labelReplacer) - idxEnd := strings.Index(s[endOffset:], "}") - if idxEnd > -1 { - label := s[idxStart+2 : endOffset+idxEnd] - - fm := metadata.ValueFunc(ctx, label) - replacement := EmptyValue - if fm != nil { - replacement = fm() - } - - b.WriteString(s[:idxStart]) - b.WriteString(replacement) - s = s[endOffset+idxEnd+1:] - } else { - break + return appendFlags(b, rr.Msg.MsgHdr) } + return append(b, EmptyValue...) + default: + return append(b, EmptyValue...) } - - b.WriteString(s) - return b.String() -} - -func boolToString(b bool) string { - if b { - return "true" - } - return "false" } -// flagsToString checks all header flags and returns those +// appendFlags checks all header flags and appends those // that are set as a string separated with commas -func flagsToString(h dns.MsgHdr) string { - flags := make([]string, 7) - i := 0 - +func appendFlags(b []byte, h dns.MsgHdr) []byte { + origLen := len(b) if h.Response { - flags[i] = "qr" - i++ + b = append(b, "qr,"...) } - if h.Authoritative { - flags[i] = "aa" - i++ + b = append(b, "aa,"...) } if h.Truncated { - flags[i] = "tc" - i++ + b = append(b, "tc,"...) } if h.RecursionDesired { - flags[i] = "rd" - i++ + b = append(b, "rd,"...) } if h.RecursionAvailable { - flags[i] = "ra" - i++ + b = append(b, "ra,"...) } if h.Zero { - flags[i] = "z" - i++ + b = append(b, "z,"...) } if h.AuthenticatedData { - flags[i] = "ad" - i++ + b = append(b, "ad,"...) } if h.CheckingDisabled { - flags[i] = "cd" - i++ + b = append(b, "cd,"...) } - return strings.Join(flags[:i], ",") + if n := len(b); n > origLen { + return b[:n-1] // trim trailing ',' + } + return b } -// addrToRFC3986 will add brackets to the address if it is an IPv6 address. -func addrToRFC3986(addr string) string { - if strings.Contains(addr, ":") { - return "[" + addr + "]" +// appendAddrToRFC3986 will add brackets to the address if it is an IPv6 address. +func appendAddrToRFC3986(b []byte, addr string) []byte { + if strings.IndexByte(addr, ':') != -1 { + b = append(b, '[') + b = append(b, addr...) + b = append(b, ']') + } else { + b = append(b, addr...) } - return addr + return b } +type nodeType int + const ( - headerReplacer = "{>" - labelReplacer = "{/" - // EmptyValue is the default empty value. - EmptyValue = "-" + typeLabel nodeType = iota // "{type}" + typeLiteral // "foo" + typeMetadata // "{/metadata}" ) + +// A node represents a segment of a parsed format. For example: "A {type}" +// contains two nodes: "A " (literal); and "{type}" (label). +type node struct { + value string // Literal value, label or metadata label + typ nodeType +} + +// A replacer is an ordered list of all the nodes in a format. +type replacer []node + +func parseFormat(s string) replacer { + // Assume there is a literal between each label - its cheaper to over + // allocate once than allocate twice. + rep := make(replacer, 0, strings.Count(s, "{")*2) + for { + // We find the right bracket then backtrack to find the left bracket. + // This allows us to handle formats like: "{ {foo} }". + j := strings.IndexByte(s, '}') + if j < 0 { + break + } + i := strings.LastIndexByte(s[:j], '{') + if i < 0 { + // Handle: "A } {foo}" by treating "A }" as a literal + rep = append(rep, node{ + value: s[:j+1], + typ: typeLiteral, + }) + s = s[j+1:] + continue + } + + val := s[i : j+1] + var typ nodeType + switch _, ok := labels[val]; { + case ok: + typ = typeLabel + case strings.HasPrefix(val, "{/"): + // Strip "{/}" from metadata labels + val = val[2 : len(val)-1] + typ = typeMetadata + default: + // Given: "A {X}" val is "{X}" expand it to the whole literal. + val = s[:j+1] + typ = typeLiteral + } + + // Append any leading literal. Given "A {type}" the literal is "A " + if i != 0 && typ != typeLiteral { + rep = append(rep, node{ + value: s[:i], + typ: typeLiteral, + }) + } + rep = append(rep, node{ + value: val, + typ: typ, + }) + s = s[j+1:] + } + if len(s) != 0 { + rep = append(rep, node{ + value: s, + typ: typeLiteral, + }) + } + return rep +} + +var replacerCache sync.Map // map[string]replacer + +func loadFormat(s string) replacer { + if v, ok := replacerCache.Load(s); ok { + return v.(replacer) + } + v, _ := replacerCache.LoadOrStore(s, parseFormat(s)) + return v.(replacer) +} + +// bufPool stores pointers to scratch buffers. +var bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 0, 256) + }, +} + +func (r replacer) Replace(ctx context.Context, state request.Request, rr *dnstest.Recorder) string { + b := bufPool.Get().([]byte) + for _, s := range r { + switch s.typ { + case typeLabel: + b = appendValue(b, state, rr, s.value) + case typeLiteral: + b = append(b, s.value...) + case typeMetadata: + if fm := metadata.ValueFunc(ctx, s.value); fm != nil { + b = append(b, fm()...) + } else { + b = append(b, EmptyValue...) + } + } + } + s := string(b) + bufPool.Put(b[:0]) + return s +} diff --git a/plugin/pkg/replacer/replacer_test.go b/plugin/pkg/replacer/replacer_test.go index 13ba396b974..3e0a5069119 100644 --- a/plugin/pkg/replacer/replacer_test.go +++ b/plugin/pkg/replacer/replacer_test.go @@ -2,6 +2,8 @@ package replacer import ( "context" + "reflect" + "strings" "testing" "github.com/coredns/coredns/plugin/metadata" @@ -12,6 +14,9 @@ import ( "github.com/miekg/dns" ) +// This is the default format used by the log package +const CommonLogFormat = `{remote}:{port} - {>id} "{type} {class} {name} {proto} {size} {>do} {>bufsize}" {rcode} {>rflags} {rsize} {duration}` + func TestReplacer(t *testing.T) { w := dnstest.NewRecorder(&test.ResponseWriter{}) r := new(dns.Msg) @@ -32,6 +37,178 @@ func TestReplacer(t *testing.T) { } } +func TestParseFormat(t *testing.T) { + type formatTest struct { + Format string + Expected replacer + } + tests := []formatTest{ + { + Format: "", + Expected: replacer{}, + }, + { + Format: "A", + Expected: replacer{ + {"A", typeLiteral}, + }, + }, + { + Format: "A {A}", + Expected: replacer{ + {"A {A}", typeLiteral}, + }, + }, + { + Format: "{{remote}}", + Expected: replacer{ + {"{", typeLiteral}, + {"{remote}", typeLabel}, + {"}", typeLiteral}, + }, + }, + { + Format: "{ A {remote} A }", + Expected: replacer{ + {"{ A ", typeLiteral}, + {"{remote}", typeLabel}, + {" A }", typeLiteral}, + }, + }, + { + Format: "{remote}}", + Expected: replacer{ + {"{remote}", typeLabel}, + {"}", typeLiteral}, + }, + }, + { + Format: "{{remote}", + Expected: replacer{ + {"{", typeLiteral}, + {"{remote}", typeLabel}, + }, + }, + { + Format: `Foo } {remote}`, + Expected: replacer{ + // we don't do any optimizations to join adjacent literals + {"Foo }", typeLiteral}, + {" ", typeLiteral}, + {"{remote}", typeLabel}, + }, + }, + { + Format: `{ Foo`, + Expected: replacer{ + {"{ Foo", typeLiteral}, + }, + }, + { + Format: `} Foo`, + Expected: replacer{ + {"}", typeLiteral}, + {" Foo", typeLiteral}, + }, + }, + { + Format: "A { {remote} {type} {/meta1} } B", + Expected: replacer{ + {"A { ", typeLiteral}, + {"{remote}", typeLabel}, + {" ", typeLiteral}, + {"{type}", typeLabel}, + {" ", typeLiteral}, + {"meta1", typeMetadata}, + {" }", typeLiteral}, + {" B", typeLiteral}, + }, + }, + { + Format: `LOG {remote}:{port} - {>id} "{type} {class} {name} {proto} ` + + `{size} {>do} {>bufsize}" {rcode} {>rflags} {rsize} {/meta1}-{/meta2} ` + + `{duration} END OF LINE`, + Expected: replacer{ + {"LOG ", typeLiteral}, + {"{remote}", typeLabel}, + {":", typeLiteral}, + {"{port}", typeLabel}, + {" - ", typeLiteral}, + {"{>id}", typeLabel}, + {` "`, typeLiteral}, + {"{type}", typeLabel}, + {" ", typeLiteral}, + {"{class}", typeLabel}, + {" ", typeLiteral}, + {"{name}", typeLabel}, + {" ", typeLiteral}, + {"{proto}", typeLabel}, + {" ", typeLiteral}, + {"{size}", typeLabel}, + {" ", typeLiteral}, + {"{>do}", typeLabel}, + {" ", typeLiteral}, + {"{>bufsize}", typeLabel}, + {`" `, typeLiteral}, + {"{rcode}", typeLabel}, + {" ", typeLiteral}, + {"{>rflags}", typeLabel}, + {" ", typeLiteral}, + {"{rsize}", typeLabel}, + {" ", typeLiteral}, + {"meta1", typeMetadata}, + {"-", typeLiteral}, + {"meta2", typeMetadata}, + {" ", typeLiteral}, + {"{duration}", typeLabel}, + {" END OF LINE", typeLiteral}, + }, + }, + } + for i, x := range tests { + r := parseFormat(x.Format) + if !reflect.DeepEqual(r, x.Expected) { + t.Errorf("%d: Expected:\n\t%+v\nGot:\n\t%+v", i, x.Expected, r) + } + } +} + +func TestParseFormatNodes(t *testing.T) { + // If we parse the format successfully the result of joining all the + // segments should match the original format. + formats := []string{ + "", + "msg", + "{remote}", + "{remote}", + "{{remote}", + "{{remote}}", + "{{remote}} A", + CommonLogFormat, + CommonLogFormat + " FOO} {BAR}", + "A " + CommonLogFormat + " FOO} {BAR}", + "A " + CommonLogFormat + " {/meta}", + } + join := func(r replacer) string { + a := make([]string, len(r)) + for i, n := range r { + if n.typ == typeMetadata { + a[i] = "{/" + n.value + "}" + } else { + a[i] = n.value + } + } + return strings.Join(a, "") + } + for _, format := range formats { + r := parseFormat(format) + s := join(r) + if s != format { + t.Errorf("Expected format to be: '%s' got: '%s'", format, s) + } + } +} + func TestLabels(t *testing.T) { w := dnstest.NewRecorder(&test.ResponseWriter{}) r := new(dns.Msg) @@ -68,7 +245,7 @@ func TestLabels(t *testing.T) { t.Fatalf("Expect %d labels, got %d", len(expect), len(labels)) } - for _, lbl := range labels { + for lbl := range labels { repl := replacer.Replace(ctx, state, w, lbl) if lbl == "{duration}" { if repl[len(repl)-1] != 's' { @@ -98,6 +275,34 @@ func BenchmarkReplacer(b *testing.B) { } } +func BenchmarkReplacer_CommonLogFormat(b *testing.B) { + + w := dnstest.NewRecorder(&test.ResponseWriter{}) + r := new(dns.Msg) + r.SetQuestion("example.org.", dns.TypeHINFO) + r.Id = 1053 + r.AuthenticatedData = true + r.CheckingDisabled = true + r.MsgHdr.AuthenticatedData = true + w.WriteMsg(r) + state := request.Request{W: w, Req: r} + + replacer := New() + ctxt := context.TODO() + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + replacer.Replace(ctxt, state, w, CommonLogFormat) + } +} + +func BenchmarkParseFormat(b *testing.B) { + for i := 0; i < b.N; i++ { + parseFormat(CommonLogFormat) + } +} + type testProvider map[string]metadata.Func func (tp testProvider) Metadata(ctx context.Context, state request.Request) context.Context { From eefe49dd3e4c804560a365e55665138ef474f4e1 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 17 Jul 2019 20:40:15 +0000 Subject: [PATCH 019/324] plugin/route53: small doc cleanups (#3016) Adjust style to rest fo the plugins. Signed-off-by: Miek Gieben --- plugin/route53/README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugin/route53/README.md b/plugin/route53/README.md index 8a7863081b8..d492ae913c3 100644 --- a/plugin/route53/README.md +++ b/plugin/route53/README.md @@ -15,7 +15,7 @@ The route53 plugin can be used when coredns is deployed on AWS or elsewhere. ~~~ txt route53 [ZONE:HOSTED_ZONE_ID...] { - [aws_access_key AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY] + aws_access_key [AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY] credentials PROFILE [FILENAME] fallthrough [ZONES...] } @@ -41,11 +41,12 @@ route53 [ZONE:HOSTED_ZONE_ID...] { * **FILENAME** AWS credentials filename. Defaults to `~/.aws/credentials` are used. * `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. - If **[ZONES...]** is omitted, then fallthrough happens for all zones for which the plugin is + If **ZONES** is omitted, then fallthrough happens for all zones for which the plugin is authoritative. If specific zones are listed (for example `in-addr.arpa` and `ip6.arpa`), then only queries for those zones will be subject to fallthrough. -* **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block +* **ZONES** zones it should be authoritative for. If empty, the zones from the configuration + block. ## Examples From 6c9a5997610c66de7795276dd285926e6a8252af Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 18 Jul 2019 14:56:59 +0000 Subject: [PATCH 020/324] plugin/file: fix setting ReloadInterval (#3017) * plugin/file: fix setting ReloadInterval The reload interval was only correctly set if there was an extra block for the file. Move this down to set up. Add test case that fails before, but now works. Signed-off-by: Miek Gieben * layout and use Errorf Signed-off-by: Miek Gieben --- plugin/file/setup.go | 9 ++++++--- plugin/file/setup_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/plugin/file/setup.go b/plugin/file/setup.go index 9be09fe8d54..162db6d657c 100644 --- a/plugin/file/setup.go +++ b/plugin/file/setup.go @@ -97,7 +97,6 @@ func fileParse(c *caddy.Controller) (Zones, error) { names = append(names, origins[i]) } - upstr := upstream.New() t := []string{} var e error @@ -128,11 +127,15 @@ func fileParse(c *caddy.Controller) (Zones, error) { if t != nil { z[origin].TransferTo = append(z[origin].TransferTo, t...) } - z[origin].ReloadInterval = reload - z[origin].Upstream = upstr } } } + + for origin := range z { + z[origin].ReloadInterval = reload + z[origin].Upstream = upstream.New() + } + if openErr != nil { if reload == 0 { // reload hasn't been set make this a fatal error diff --git a/plugin/file/setup_test.go b/plugin/file/setup_test.go index f6252759bfb..6a1d5e7906b 100644 --- a/plugin/file/setup_test.go +++ b/plugin/file/setup_test.go @@ -2,6 +2,7 @@ package file import ( "testing" + "time" "github.com/coredns/coredns/plugin/test" @@ -90,3 +91,35 @@ func TestFileParse(t *testing.T) { } } } + +func TestParseReload(t *testing.T) { + name, rm, err := test.TempFile(".", dbMiekNL) + if err != nil { + t.Fatal(err) + } + defer rm() + + tests := []struct { + input string + reload time.Duration + }{ + { + `file ` + name + ` example.org.`, + 1 * time.Minute, + }, + { + `file ` + name + ` example.org. { + reload 5s + }`, + 5 * time.Second, + }, + } + + for i, test := range tests { + c := caddy.NewTestController("dns", test.input) + z, _ := fileParse(c) + if x := z.Z["example.org."].ReloadInterval; x != test.reload { + t.Errorf("Test %d expected reload to be %s, but got %s", i, test.reload, x) + } + } +} From 527772fc5f27acf1b12d7012bde6c95588d79f50 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Thu, 18 Jul 2019 09:41:09 -0700 Subject: [PATCH 021/324] build(deps): bump gopkg.in/DataDog/dd-trace-go.v1 from 1.15.0 to 1.16.0 (#3018) Bumps [gopkg.in/DataDog/dd-trace-go.v1](https://github.com/DataDog/dd-trace-go) from 1.15.0 to 1.16.0. - [Release notes](https://github.com/DataDog/dd-trace-go/releases) - [Commits](https://github.com/DataDog/dd-trace-go/compare/v1.15.0...v1.16.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 4a1a3b87d24..30af9703043 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,7 @@ require ( golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.22.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190313235455-40a48860b5ab k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 diff --git a/go.sum b/go.sum index d13a72c9aa8..9e9a8433f35 100644 --- a/go.sum +++ b/go.sum @@ -342,6 +342,8 @@ google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 h1:2LhklnAJsRSelbnBrrE5QuRleRDkmOh2JWxOtIX6yec= gopkg.in/DataDog/dd-trace-go.v1 v1.15.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= +gopkg.in/DataDog/dd-trace-go.v1 v1.16.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= From 1d5095cf6789fc26c7eface8b68d747d85dbf02e Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 18 Jul 2019 17:44:47 +0000 Subject: [PATCH 022/324] plugin/file: Rename do to walk and cleanup and document (#2987) * plugin/file: Rename do to walk, cleanup and document * This renames Do to Walk to be more inline with Go standards. Also make it return an error instead of a bool. Also give give walk access to rrs. Alternatively e.m could be exported, but just access the map of rrs should work as well. Another alternative would be adding a whole bunch of helper functions, but those need grab and return the data. Just having access to the rrs should be easiest for most Walks. * It adds Type and TypeForWildcard to show the different functions * *Removes* the identical RR check when inserting; this was only done for A, AAAA and MX and not finished; removed under the mantra garbage in garbage out. * Reuses Types to return all the types in an *tree.Elem Signed-off-by: Miek Gieben * better comments Signed-off-by: Miek Gieben --- plugin/file/delete_test.go | 65 +++++++++++++++++++++++++ plugin/file/file.go | 1 - plugin/file/lookup.go | 42 ++++++++-------- plugin/file/tree/all.go | 29 +---------- plugin/file/tree/elem.go | 99 ++++++++++++-------------------------- plugin/file/tree/tree.go | 11 ++--- plugin/file/tree/walk.go | 30 ++++++++++++ 7 files changed, 154 insertions(+), 123 deletions(-) create mode 100644 plugin/file/delete_test.go create mode 100644 plugin/file/tree/walk.go diff --git a/plugin/file/delete_test.go b/plugin/file/delete_test.go new file mode 100644 index 00000000000..26ee64e3a4a --- /dev/null +++ b/plugin/file/delete_test.go @@ -0,0 +1,65 @@ +package file + +import ( + "bytes" + "fmt" + "testing" + + "github.com/coredns/coredns/plugin/file/tree" + "github.com/coredns/coredns/plugin/test" + + "github.com/miekg/dns" +) + +/* +Create a zone with: + + apex + / + a MX + a A + +Test that: we create the proper tree and that delete +deletes the correct elements +*/ + +var tz = NewZone("example.org.", "db.example.org.") + +type treebuf struct { + *bytes.Buffer +} + +func (t *treebuf) printFunc(e *tree.Elem, rrs map[uint16][]dns.RR) error { + fmt.Fprintf(t.Buffer, "%v\n", rrs) // should be fixed order in new go versions. + return nil +} + +func TestZoneInsertAndDelete(t *testing.T) { + tz.Insert(test.SOA("example.org. IN SOA 1 2 3 4 5")) + + if x := tz.Apex.SOA.Header().Name; x != "example.org." { + t.Errorf("Failed to insert SOA, expected %s, git %s", "example.org.", x) + } + + // Insert two RRs and then remove one. + tz.Insert(test.A("a.example.org. IN A 127.0.0.1")) + tz.Insert(test.MX("a.example.org. IN MX 10 mx.example.org.")) + + tz.Delete(test.MX("a.example.org. IN MX 10 mx.example.org.")) + + tb := treebuf{new(bytes.Buffer)} + + tz.Walk(tb.printFunc) + if tb.String() != "map[1:[a.example.org.\t3600\tIN\tA\t127.0.0.1]]\n" { + t.Errorf("Expected 1 A record in tree, got %s", tb.String()) + } + + tz.Delete(test.A("a.example.org. IN A 127.0.0.1")) + + tb.Reset() + + tz.Walk(tb.printFunc) + if tb.String() != "" { + t.Errorf("Expected no record in tree, got %s", tb.String()) + } +} diff --git a/plugin/file/file.go b/plugin/file/file.go index bc582cfaa76..169720e64af 100644 --- a/plugin/file/file.go +++ b/plugin/file/file.go @@ -119,7 +119,6 @@ func (s *serialErr) Error() string { // If serial >= 0 it will reload the zone, if the SOA hasn't changed // it returns an error indicating nothing was read. func Parse(f io.Reader, origin, fileName string, serial int64) (*Zone, error) { - zp := dns.NewZoneParser(f, dns.Fqdn(origin), fileName) zp.SetIncludeAllowed(true) z := NewZone(origin, fileName) diff --git a/plugin/file/lookup.go b/plugin/file/lookup.go index 3a72a616332..14dfb6f7d6c 100644 --- a/plugin/file/lookup.go +++ b/plugin/file/lookup.go @@ -106,14 +106,14 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) } // If we see DNAME records, we should return those. - if dnamerrs := elem.Types(dns.TypeDNAME); dnamerrs != nil { + if dnamerrs := elem.Type(dns.TypeDNAME); dnamerrs != nil { // Only one DNAME is allowed per name. We just pick the first one to synthesize from. dname := dnamerrs[0] if cname := synthesizeCNAME(state.Name(), dname.(*dns.DNAME)); cname != nil { answer, ns, extra, rcode := z.additionalProcessing(ctx, state, elem, []dns.RR{cname}) if do { - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) sigs = signatureForSubType(sigs, dns.TypeDNAME) dnamerrs = append(dnamerrs, sigs...) } @@ -130,7 +130,7 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) } // If we see NS records, it means the name as been delegated, and we should return the delegation. - if nsrrs := elem.Types(dns.TypeNS); nsrrs != nil { + if nsrrs := elem.Type(dns.TypeNS); nsrrs != nil { // If the query is specifically for DS and the qname matches the delegated name, we should // return the DS in the answer section and leave the rest empty, i.e. just continue the loop @@ -160,11 +160,11 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // Found entire name. if found && shot { - if rrs := elem.Types(dns.TypeCNAME); len(rrs) > 0 && qtype != dns.TypeCNAME { + if rrs := elem.Type(dns.TypeCNAME); len(rrs) > 0 && qtype != dns.TypeCNAME { return z.additionalProcessing(ctx, state, elem, rrs) } - rrs := elem.Types(qtype, qname) + rrs := elem.Type(qtype) // NODATA if len(rrs) == 0 { @@ -181,7 +181,7 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) additional := additionalProcessing(z, rrs, do) if do { - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) sigs = signatureForSubType(sigs, qtype) rrs = append(rrs, sigs...) } @@ -196,11 +196,11 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) if wildElem != nil { auth := z.ns(do) - if rrs := wildElem.Types(dns.TypeCNAME, qname); len(rrs) > 0 { + if rrs := wildElem.TypeForWildcard(dns.TypeCNAME, qname); len(rrs) > 0 { return z.additionalProcessing(ctx, state, wildElem, rrs) } - rrs := wildElem.Types(qtype, qname) + rrs := wildElem.TypeForWildcard(qtype, qname) // NODATA response. if len(rrs) == 0 { @@ -219,7 +219,7 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) auth = append(auth, nsec...) } - sigs := wildElem.Types(dns.TypeRRSIG, qname) + sigs := wildElem.TypeForWildcard(dns.TypeRRSIG, qname) sigs = signatureForSubType(sigs, qtype) rrs = append(rrs, sigs...) @@ -272,9 +272,9 @@ Out: // Return type tp from e and add signatures (if they exists) and do is true. func (z *Zone) typeFromElem(elem *tree.Elem, tp uint16, do bool) []dns.RR { - rrs := elem.Types(tp) + rrs := elem.Type(tp) if do { - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) sigs = signatureForSubType(sigs, tp) if len(sigs) > 0 { rrs = append(rrs, sigs...) @@ -306,7 +306,7 @@ func (z *Zone) additionalProcessing(ctx context.Context, state request.Request, do := state.Do() if do { - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) sigs = signatureForSubType(sigs, dns.TypeCNAME) if len(sigs) > 0 { rrs = append(rrs, sigs...) @@ -323,12 +323,12 @@ func (z *Zone) additionalProcessing(ctx context.Context, state request.Request, i := 0 Redo: - cname := elem.Types(dns.TypeCNAME) + cname := elem.Type(dns.TypeCNAME) if len(cname) > 0 { rrs = append(rrs, cname...) if do { - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) sigs = signatureForSubType(sigs, dns.TypeCNAME) if len(sigs) > 0 { rrs = append(rrs, sigs...) @@ -354,7 +354,7 @@ Redo: rrs = append(rrs, targets...) if do { - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) sigs = signatureForSubType(sigs, qtype) if len(sigs) > 0 { rrs = append(rrs, sigs...) @@ -416,9 +416,9 @@ func (z *Zone) searchGlue(name string, do bool) []dns.RR { // A if elem, found := z.Tree.Search(name); found { - glue = append(glue, elem.Types(dns.TypeA)...) + glue = append(glue, elem.Type(dns.TypeA)...) if do { - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) sigs = signatureForSubType(sigs, dns.TypeA) glue = append(glue, sigs...) } @@ -426,9 +426,9 @@ func (z *Zone) searchGlue(name string, do bool) []dns.RR { // AAAA if elem, found := z.Tree.Search(name); found { - glue = append(glue, elem.Types(dns.TypeAAAA)...) + glue = append(glue, elem.Type(dns.TypeAAAA)...) if do { - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) sigs = signatureForSubType(sigs, dns.TypeAAAA) glue = append(glue, sigs...) } @@ -456,9 +456,9 @@ func additionalProcessing(z *Zone, answer []dns.RR, do bool) (extra []dns.RR) { continue } - sigs := elem.Types(dns.TypeRRSIG) + sigs := elem.Type(dns.TypeRRSIG) for _, addr := range []uint16{dns.TypeA, dns.TypeAAAA} { - if a := elem.Types(addr); a != nil { + if a := elem.Type(addr); a != nil { extra = append(extra, a...) if do { sig := signatureForSubType(sigs, addr) diff --git a/plugin/file/tree/all.go b/plugin/file/tree/all.go index fd806365f38..e1fc5b392c3 100644 --- a/plugin/file/tree/all.go +++ b/plugin/file/tree/all.go @@ -1,6 +1,6 @@ package tree -// All traverses tree and returns all elements +// All traverses tree and returns all elements. func (t *Tree) All() []*Elem { if t.Root == nil { return nil @@ -19,30 +19,3 @@ func (n *Node) all(found []*Elem) []*Elem { } return found } - -// Do performs fn on all values stored in the tree. A boolean is returned indicating whether the -// Do traversal was interrupted by an Operation returning true. If fn alters stored values' sort -// relationships, future tree operation behaviors are undefined. -func (t *Tree) Do(fn func(e *Elem) bool) bool { - if t.Root == nil { - return false - } - return t.Root.do(fn) -} - -func (n *Node) do(fn func(e *Elem) bool) (done bool) { - if n.Left != nil { - done = n.Left.do(fn) - if done { - return - } - } - done = fn(n.Elem) - if done { - return - } - if n.Right != nil { - done = n.Right.do(fn) - } - return -} diff --git a/plugin/file/tree/elem.go b/plugin/file/tree/elem.go index 6317cc91237..c1909649d68 100644 --- a/plugin/file/tree/elem.go +++ b/plugin/file/tree/elem.go @@ -15,20 +15,34 @@ func newElem(rr dns.RR) *Elem { return &e } -// Types returns the RRs with type qtype from e. If qname is given (only the -// first one is used), the RR are copied and the owner is replaced with qname[0]. -func (e *Elem) Types(qtype uint16, qname ...string) []dns.RR { +// Types returns the types of the records in e. The returned list is not sorted. +func (e *Elem) Types() []uint16 { + t := make([]uint16, len(e.m)) + i := 0 + for ty := range e.m { + t[i] = ty + i++ + } + return t +} + +// Type returns the RRs with type qtype from e. +func (e *Elem) Type(qtype uint16) []dns.RR { return e.m[qtype] } + +// TypeForWildcard returns the RRs with type qtype from e. The ownername returned is set to qname. +func (e *Elem) TypeForWildcard(qtype uint16, qname string) []dns.RR { rrs := e.m[qtype] - if rrs != nil && len(qname) > 0 { - copied := make([]dns.RR, len(rrs)) - for i := range rrs { - copied[i] = dns.Copy(rrs[i]) - copied[i].Header().Name = qname[0] - } - return copied + if rrs == nil { + return nil + } + + copied := make([]dns.RR, len(rrs)) + for i := range rrs { + copied[i] = dns.Copy(rrs[i]) + copied[i].Header().Name = qname } - return rrs + return copied } // All returns all RRs from e, regardless of type. @@ -52,13 +66,10 @@ func (e *Elem) Name() string { return "" } -// Empty returns true is e does not contain any RRs, i.e. is an -// empty-non-terminal. -func (e *Elem) Empty() bool { - return len(e.m) == 0 -} +// Empty returns true is e does not contain any RRs, i.e. is an empty-non-terminal. +func (e *Elem) Empty() bool { return len(e.m) == 0 } -// Insert inserts rr into e. If rr is equal to existing rrs this is a noop. +// Insert inserts rr into e. If rr is equal to existing RRs, the RR will be added anyway. func (e *Elem) Insert(rr dns.RR) { t := rr.Header().Rrtype if e.m == nil { @@ -71,66 +82,20 @@ func (e *Elem) Insert(rr dns.RR) { e.m[t] = []dns.RR{rr} return } - for _, er := range rrs { - if equalRdata(er, rr) { - return - } - } rrs = append(rrs, rr) e.m[t] = rrs } -// Delete removes rr from e. When e is empty after the removal the returned bool is true. -func (e *Elem) Delete(rr dns.RR) (empty bool) { +// Delete removes all RRs of type rr.Header().Rrtype from e. +func (e *Elem) Delete(rr dns.RR) { if e.m == nil { - return true - } - - t := rr.Header().Rrtype - rrs, ok := e.m[t] - if !ok { return } - for i, er := range rrs { - if equalRdata(er, rr) { - rrs = removeFromSlice(rrs, i) - e.m[t] = rrs - empty = len(rrs) == 0 - if empty { - delete(e.m, t) - } - return - } - } - return + t := rr.Header().Rrtype + delete(e.m, t) } // Less is a tree helper function that calls less. func Less(a *Elem, name string) int { return less(name, a.Name()) } - -// Assuming the same type and name this will check if the rdata is equal as well. -func equalRdata(a, b dns.RR) bool { - switch x := a.(type) { - // TODO(miek): more types, i.e. all types. + tests for this. - case *dns.A: - return x.A.Equal(b.(*dns.A).A) - case *dns.AAAA: - return x.AAAA.Equal(b.(*dns.AAAA).AAAA) - case *dns.MX: - if x.Mx == b.(*dns.MX).Mx && x.Preference == b.(*dns.MX).Preference { - return true - } - } - return false -} - -// removeFromSlice removes index i from the slice. -func removeFromSlice(rrs []dns.RR, i int) []dns.RR { - if i >= len(rrs) { - return rrs - } - rrs = append(rrs[:i], rrs[i+1:]...) - return rrs -} diff --git a/plugin/file/tree/tree.go b/plugin/file/tree/tree.go index ed33c09a488..3aeeba4d5d8 100644 --- a/plugin/file/tree/tree.go +++ b/plugin/file/tree/tree.go @@ -275,7 +275,8 @@ func (n *Node) deleteMax() (root *Node, d int) { return } -// Delete removes rr from the tree, is the node turns empty, that node is deleted with DeleteNode. +// Delete removes all RRs of type rr.Header().Rrtype from e. If after the deletion of rr the node is empty the +// entire node is deleted. func (t *Tree) Delete(rr dns.RR) { if t.Root == nil { return @@ -283,15 +284,13 @@ func (t *Tree) Delete(rr dns.RR) { el, _ := t.Search(rr.Header().Name) if el == nil { - t.deleteNode(rr) return } - // Delete from this element. - empty := el.Delete(rr) - if empty { + el.Delete(rr) + if el.Empty() { t.deleteNode(rr) - return } + return } // DeleteNode deletes the node that matches rr according to Less(). diff --git a/plugin/file/tree/walk.go b/plugin/file/tree/walk.go new file mode 100644 index 00000000000..00accbafafc --- /dev/null +++ b/plugin/file/tree/walk.go @@ -0,0 +1,30 @@ +package tree + +import "github.com/miekg/dns" + +// Walk performs fn on all values stored in the tree. If a non-nil error is returned the +// Walk was interrupted by an fn returning that error. If fn alters stored values' sort +// relationships, future tree operation behaviors are undefined. +func (t *Tree) Walk(fn func(e *Elem, rrs map[uint16][]dns.RR) error) error { + if t.Root == nil { + return nil + } + return t.Root.walk(fn) +} + +func (n *Node) walk(fn func(e *Elem, rrs map[uint16][]dns.RR) error) error { + if n.Left != nil { + if err := n.Left.walk(fn); err != nil { + return err + } + } + if err := fn(n.Elem, n.Elem.m); err != nil { + return err + } + if n.Right != nil { + if err := n.Right.walk(fn); err != nil { + return err + } + } + return nil +} From d0c92544097b0dbaaf47d2622ddc80ea11ea3f4e Mon Sep 17 00:00:00 2001 From: Christian Muehlhaeuser Date: Fri, 19 Jul 2019 08:06:27 +0200 Subject: [PATCH 023/324] Fixed typo in 'transferred' (#3020) Just a nitpicky typo fix. --- test/secondary_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/secondary_test.go b/test/secondary_test.go index 685e23e613e..bfa1da05b2c 100644 --- a/test/secondary_test.go +++ b/test/secondary_test.go @@ -71,7 +71,7 @@ func TestSecondaryZoneTransfer(t *testing.T) { m.SetQuestion("example.org.", dns.TypeSOA) var r *dns.Msg - // This is now async; we we need to wait for it to be transfered. + // This is now async; we we need to wait for it to be transferred. for i := 0; i < 10; i++ { r, _ = dns.Exchange(m, udp) if len(r.Answer) == 0 { From c928dbd754243f55c19c122ccafe7709c041bf4c Mon Sep 17 00:00:00 2001 From: Christian Muehlhaeuser Date: Fri, 19 Jul 2019 08:31:29 +0200 Subject: [PATCH 024/324] Added comment why ominous assignment is required (#3021) --- plugin/etcd/msg/service_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugin/etcd/msg/service_test.go b/plugin/etcd/msg/service_test.go index 24bea47f632..a5038ac2cd5 100644 --- a/plugin/etcd/msg/service_test.go +++ b/plugin/etcd/msg/service_test.go @@ -128,6 +128,7 @@ func BenchmarkNewSRV(b *testing.B) { s := &Service{Host: "www,example.org", Port: 8080} for n := 0; n < b.N; n++ { srv := s.NewSRV("www.example.org.", 16) + // this assignment makes sure s.NewSRV doesn't get optimized out srv = srv } } From 031dfede905143694fb33fb30eba9a1721a77bbc Mon Sep 17 00:00:00 2001 From: Charlie Vieth Date: Fri, 19 Jul 2019 05:19:03 -0400 Subject: [PATCH 025/324] pkg/cache: fix race in Add() and Evict() (#3013) * pkg/cache: fix race in Add() and Evict() This fixes a race in Add() when the shard is at max capacity and the key being added is already stored. Previously, the shard would evict a random value - when all it needed to do was replace an existing value. There was a race in how Evict() picked which key to remove, which would cause concurrent calls to Evict() to remove the same key. Additionally, this commit removes a lot of the lock contention and a race around Add() and Evict() by changing them to immediately hold the write lock. Previously, they would check conditions with the read lock held and not re-check those conditions once the write lock was acquired (this is a race). * pkg/cache: code review comments * pkg/cache: simplify Add() logic --- plugin/pkg/cache/cache.go | 31 +++++-------- plugin/pkg/cache/cache_test.go | 25 +++++++++- plugin/pkg/cache/shard_test.go | 83 +++++++++++++++++++++++++++++++++- 3 files changed, 116 insertions(+), 23 deletions(-) diff --git a/plugin/pkg/cache/cache.go b/plugin/pkg/cache/cache.go index 8a5ad783e05..3a2c8ff7ffa 100644 --- a/plugin/pkg/cache/cache.go +++ b/plugin/pkg/cache/cache.go @@ -76,12 +76,15 @@ func newShard(size int) *shard { return &shard{items: make(map[uint64]interface{ // Add adds element indexed by key into the cache. Any existing element is overwritten func (s *shard) Add(key uint64, el interface{}) { - l := s.Len() - if l+1 > s.size { - s.Evict() - } - s.Lock() + if len(s.items) >= s.size { + if _, ok := s.items[key]; !ok { + for k := range s.items { + delete(s.items, k) + break + } + } + } s.items[key] = el s.Unlock() } @@ -95,24 +98,12 @@ func (s *shard) Remove(key uint64) { // Evict removes a random element from the cache. func (s *shard) Evict() { - hasKey := false - var key uint64 - - s.RLock() + s.Lock() for k := range s.items { - key = k - hasKey = true + delete(s.items, k) break } - s.RUnlock() - - if !hasKey { - // empty cache - return - } - - // If this item is gone between the RUnlock and Lock race we don't care. - s.Remove(key) + s.Unlock() } // Get looks up the element indexed under key. diff --git a/plugin/pkg/cache/cache_test.go b/plugin/pkg/cache/cache_test.go index 0c56bb9b331..2714967a661 100644 --- a/plugin/pkg/cache/cache_test.go +++ b/plugin/pkg/cache/cache_test.go @@ -3,12 +3,23 @@ package cache import "testing" func TestCacheAddAndGet(t *testing.T) { - c := New(4) + const N = shardSize * 4 + c := New(N) c.Add(1, 1) if _, found := c.Get(1); !found { t.Fatal("Failed to find inserted record") } + + for i := 0; i < N; i++ { + c.Add(uint64(i), 1) + } + for i := 0; i < N; i++ { + c.Add(uint64(i), 1) + if c.Len() != N { + t.Fatal("A item was unnecessarily evicted from the cache") + } + } } func TestCacheLen(t *testing.T) { @@ -30,6 +41,18 @@ func TestCacheLen(t *testing.T) { } } +func TestCacheSharding(t *testing.T) { + c := New(shardSize) + for i := 0; i < shardSize*2; i++ { + c.Add(uint64(i), 1) + } + for i, s := range c.shards { + if s.Len() == 0 { + t.Errorf("Failed to populate shard: %d", i) + } + } +} + func BenchmarkCache(b *testing.B) { b.ReportAllocs() diff --git a/plugin/pkg/cache/shard_test.go b/plugin/pkg/cache/shard_test.go index 26675cee122..a3831305db6 100644 --- a/plugin/pkg/cache/shard_test.go +++ b/plugin/pkg/cache/shard_test.go @@ -1,14 +1,40 @@ package cache -import "testing" +import ( + "sync" + "testing" +) func TestShardAddAndGet(t *testing.T) { - s := newShard(4) + s := newShard(1) s.Add(1, 1) if _, found := s.Get(1); !found { t.Fatal("Failed to find inserted record") } + + s.Add(2, 1) + if _, found := s.Get(1); found { + t.Fatal("Failed to evict record") + } + if _, found := s.Get(2); !found { + t.Fatal("Failed to find inserted record") + } +} + +func TestAddEvict(t *testing.T) { + const size = 1024 + s := newShard(size) + + for i := uint64(0); i < size; i++ { + s.Add(i, 1) + } + for i := uint64(0); i < size; i++ { + s.Add(i, 1) + if s.Len() != size { + t.Fatal("A item was unnecessarily evicted from the cache") + } + } } func TestShardLen(t *testing.T) { @@ -57,4 +83,57 @@ func TestShardLenEvict(t *testing.T) { if l := s.Len(); l != 4 { t.Fatalf("Shard size should %d, got %d", 4, l) } + + // Make sure we don't accidentally evict an element when + // we the key is already stored. + for i := 0; i < 4; i++ { + s.Add(5, 1) + if l := s.Len(); l != 4 { + t.Fatalf("Shard size should %d, got %d", 4, l) + } + } +} + +func TestShardEvictParallel(t *testing.T) { + s := newShard(shardSize) + for i := uint64(0); i < shardSize; i++ { + s.Add(i, struct{}{}) + } + start := make(chan struct{}) + var wg sync.WaitGroup + for i := 0; i < shardSize; i++ { + wg.Add(1) + go func() { + <-start + s.Evict() + wg.Done() + }() + } + close(start) // start evicting in parallel + wg.Wait() + if s.Len() != 0 { + t.Fatalf("Failed to evict all keys in parallel: %d", s.Len()) + } +} + +func BenchmarkShard(b *testing.B) { + s := newShard(shardSize) + b.ResetTimer() + for i := 0; i < b.N; i++ { + k := uint64(i) % shardSize * 2 + s.Add(k, 1) + s.Get(k) + } +} + +func BenchmarkShardParallel(b *testing.B) { + s := newShard(shardSize) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for i := uint64(0); pb.Next(); i++ { + k := i % shardSize * 2 + s.Add(k, 1) + s.Get(k) + } + }) } From f7b26db9d02cd301e26e0d42d6c41473b5c23730 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Fri, 19 Jul 2019 09:19:35 +0000 Subject: [PATCH 026/324] build(deps): bump github.com/aws/aws-sdk-go from 1.20.20 to 1.21.0 (#3022) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.20.20 to 1.21.0. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.20.20...v1.21.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 30af9703043..28fc57c8ea6 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.20.20 + github.com/aws/aws-sdk-go v1.21.0 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index 9e9a8433f35..0a713c3b6a3 100644 --- a/go.sum +++ b/go.sum @@ -21,6 +21,8 @@ github.com/aws/aws-sdk-go v1.20.17 h1:ZQ0cM9FpuufVDHlNViD8vD68IkEQL/VUOMwJPBqkaa github.com/aws/aws-sdk-go v1.20.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.20.20 h1:OAR/GtjMOhenkp1NNKr1N1FgIP3mQXHeGbRhvVIAQp0= github.com/aws/aws-sdk-go v1.20.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.21.0 h1:dRGzi4XZe5GFSJssHkRNUf/hRD/HL8bdqTYa9hpAO8c= +github.com/aws/aws-sdk-go v1.21.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 01e13c622e087779d5e0e1e82378333ccca9b1fb Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 20 Jul 2019 18:13:43 +0000 Subject: [PATCH 027/324] plugin/file: New zone should have zero records (#3025) After calling NewZone the number of records should be zero, but due to how zone.All() was implemented so empty RRs would be added. This then fails the == 0 check in xfr.go and put nil in the slice, this then subsequently panics on the Len(). Fix this making All() smarter when adding records. Added little test to enfore this. Signed-off-by: Miek Gieben --- plugin/file/xfr_test.go | 9 +++++++++ plugin/file/zone.go | 10 ++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/plugin/file/xfr_test.go b/plugin/file/xfr_test.go index 69ad68e6405..c02575556b9 100644 --- a/plugin/file/xfr_test.go +++ b/plugin/file/xfr_test.go @@ -3,6 +3,7 @@ package file import ( "fmt" "strings" + "testing" ) func ExampleZone_All() { @@ -32,3 +33,11 @@ func ExampleZone_All() { // xfr_test.go:15: a.miek.nl. 1800 IN A 139.162.196.78 // xfr_test.go:15: a.miek.nl. 1800 IN AAAA 2a01:7e00::f03c:91ff:fef1:6735 } + +func TestAllNewZone(t *testing.T) { + zone := NewZone("example.org.", "stdin") + records := zone.All() + if len(records) != 0 { + t.Errorf("Expected %d records in empty zone, got %d", 0, len(records)) + } +} diff --git a/plugin/file/zone.go b/plugin/file/zone.go index 186fdf8d097..3eb99ef66e5 100644 --- a/plugin/file/zone.go +++ b/plugin/file/zone.go @@ -173,15 +173,21 @@ func (z *Zone) All() []dns.RR { records = append(records, a.All()...) } + // Either the entire Apex is filled or none it, this isn't enforced here though. if len(z.Apex.SIGNS) > 0 { records = append(z.Apex.SIGNS, records...) } - records = append(z.Apex.NS, records...) + if len(z.Apex.NS) > 0 { + records = append(z.Apex.NS, records...) + } if len(z.Apex.SIGSOA) > 0 { records = append(z.Apex.SIGSOA, records...) } - return append([]dns.RR{z.Apex.SOA}, records...) + if z.Apex.SOA != nil { + return append([]dns.RR{z.Apex.SOA}, records...) + } + return records } // NameFromRight returns the labels from the right, staring with the From 2153cada45e421220c7b3efd0f04a76954a19ad2 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2019 06:55:41 +0000 Subject: [PATCH 028/324] build(deps): bump github.com/aws/aws-sdk-go from 1.21.0 to 1.21.1 (#3027) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.21.0 to 1.21.1. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.21.0...v1.21.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 28fc57c8ea6..6d90c3710ad 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.21.0 + github.com/aws/aws-sdk-go v1.21.1 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index 0a713c3b6a3..ec6f80c986f 100644 --- a/go.sum +++ b/go.sum @@ -23,6 +23,8 @@ github.com/aws/aws-sdk-go v1.20.20 h1:OAR/GtjMOhenkp1NNKr1N1FgIP3mQXHeGbRhvVIAQp github.com/aws/aws-sdk-go v1.20.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.21.0 h1:dRGzi4XZe5GFSJssHkRNUf/hRD/HL8bdqTYa9hpAO8c= github.com/aws/aws-sdk-go v1.21.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.21.1 h1:IOFDnCEDybcw4V8nbKqyyjBu+vpu7hFYSfZqNuogi7I= +github.com/aws/aws-sdk-go v1.21.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From dae6aea2922c71ad3848c3060e0c1d22d1856ff8 Mon Sep 17 00:00:00 2001 From: Mat Lowery Date: Tue, 23 Jul 2019 00:23:16 -0600 Subject: [PATCH 029/324] Fix response_rcode_count_total metric (#3029) --- plugin/forward/README.md | 2 +- plugin/grpc/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/forward/README.md b/plugin/forward/README.md index 94e306ea848..92ffb665527 100644 --- a/plugin/forward/README.md +++ b/plugin/forward/README.md @@ -98,7 +98,7 @@ If monitoring is enabled (via the *prometheus* directive) then the following met * `coredns_forward_request_duration_seconds{to}` - duration per upstream interaction. * `coredns_forward_request_count_total{to}` - query count per upstream. -* `coredns_forward_response_rcode_total{to, rcode}` - count of RCODEs per upstream. +* `coredns_forward_response_rcode_count_total{to, rcode}` - count of RCODEs per upstream. * `coredns_forward_healthcheck_failure_count_total{to}` - number of failed health checks per upstream. * `coredns_forward_healthcheck_broken_count_total{}` - counter of when all upstreams are unhealthy, and we are randomly (this always uses the `random` policy) spraying to an upstream. diff --git a/plugin/grpc/README.md b/plugin/grpc/README.md index 39e05eca761..09b499bb7d3 100644 --- a/plugin/grpc/README.md +++ b/plugin/grpc/README.md @@ -64,7 +64,7 @@ If monitoring is enabled (via the *prometheus* directive) then the following met * `coredns_grpc_request_duration_seconds{to}` - duration per upstream interaction. * `coredns_grpc_request_count_total{to}` - query count per upstream. -* `coredns_grpc_response_rcode_total{to, rcode}` - count of RCODEs per upstream. +* `coredns_grpc_response_rcode_count_total{to, rcode}` - count of RCODEs per upstream. and we are randomly (this always uses the `random` policy) spraying to an upstream. ## Examples From 637bd3c7dcb19659e3e52cd56fe8f4e6b455ac83 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Tue, 23 Jul 2019 07:44:47 -0700 Subject: [PATCH 030/324] build(deps): bump github.com/aws/aws-sdk-go from 1.21.1 to 1.21.2 (#3032) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.21.1 to 1.21.2. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.21.1...v1.21.2) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 6d90c3710ad..2ced9aec7e1 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.21.1 + github.com/aws/aws-sdk-go v1.21.2 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index ec6f80c986f..5926f168b2f 100644 --- a/go.sum +++ b/go.sum @@ -25,6 +25,8 @@ github.com/aws/aws-sdk-go v1.21.0 h1:dRGzi4XZe5GFSJssHkRNUf/hRD/HL8bdqTYa9hpAO8c github.com/aws/aws-sdk-go v1.21.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.21.1 h1:IOFDnCEDybcw4V8nbKqyyjBu+vpu7hFYSfZqNuogi7I= github.com/aws/aws-sdk-go v1.21.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.21.2 h1:CqbWrQzi7s8J2F0TRRdLvTr0+bt5Zxo2IDoFNGsAiUg= +github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From eba020e6a1176c0d8fed639e936ddc2774f295dd Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 23 Jul 2019 18:32:44 +0000 Subject: [PATCH 031/324] plugin/file: simplify locking (#3024) * plugin/file: simplify locking Simplify the locking, remove the reloadMu and just piggyback on the other lock for accessing content, which assumes things can be move underneath. Copy the Apex and Zone to new vars to make sure the pointer isn't updated from under us. The releadMu isn't need at all, the time.Ticker firing while we're reading means we will just miss that tick and get it on the next go. Add rrutil subpackage and put some more generic functions in there, that are now used from file and the tree package. This removes some duplication. Rename additionalProcessing that didn't actually do that to externalLookup, because that's what being done at some point. Signed-off-by: Miek Gieben * Update plugin/file/lookup.go Co-Authored-By: Michael Grosser --- plugin/file/lookup.go | 203 ++++++++++++------------------------- plugin/file/reload.go | 13 +-- plugin/file/rrutil/util.go | 29 ++++++ plugin/file/secondary.go | 4 +- plugin/file/tree/glue.go | 44 ++++++++ plugin/file/zone.go | 37 ++++--- 6 files changed, 161 insertions(+), 169 deletions(-) create mode 100644 plugin/file/rrutil/util.go create mode 100644 plugin/file/tree/glue.go diff --git a/plugin/file/lookup.go b/plugin/file/lookup.go index 14dfb6f7d6c..9c2c1959ba2 100644 --- a/plugin/file/lookup.go +++ b/plugin/file/lookup.go @@ -3,6 +3,7 @@ package file import ( "context" + "github.com/coredns/coredns/plugin/file/rrutil" "github.com/coredns/coredns/plugin/file/tree" "github.com/coredns/coredns/request" @@ -32,31 +33,23 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) qtype := state.QType() do := state.Do() - if 0 < z.ReloadInterval { - z.reloadMu.RLock() - } - defer func() { - if 0 < z.ReloadInterval { - z.reloadMu.RUnlock() - } - }() - // If z is a secondary zone we might not have transferred it, meaning we have // all zone context setup, except the actual record. This means (for one thing) the apex // is empty and we don't have a SOA record. - z.apexMu.RLock() - soa := z.Apex.SOA - z.apexMu.RUnlock() - if soa == nil { + z.RLock() + ap := z.Apex + tr := z.Tree + z.RUnlock() + if ap.SOA == nil { return nil, nil, nil, ServerFailure } if qtype == dns.TypeSOA { - return z.soa(do), z.ns(do), nil, Success + return ap.soa(do), ap.ns(do), nil, Success } if qtype == dns.TypeNS && qname == z.origin { - nsrrs := z.ns(do) - glue := z.Glue(nsrrs, do) + nsrrs := ap.ns(do) + glue := tr.Glue(nsrrs, do) // technically this isn't glue return nsrrs, nil, glue, Success } @@ -87,14 +80,14 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) break } - elem, found = z.Tree.Search(parts) + elem, found = tr.Search(parts) if !found { // Apex will always be found, when we are here we can search for a wildcard // and save the result of that search. So when nothing match, but we have a // wildcard we should expand the wildcard. wildcard := replaceWithAsteriskLabel(parts) - if wild, found := z.Tree.Search(wildcard); found { + if wild, found := tr.Search(wildcard); found { wildElem = wild } @@ -110,11 +103,11 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // Only one DNAME is allowed per name. We just pick the first one to synthesize from. dname := dnamerrs[0] if cname := synthesizeCNAME(state.Name(), dname.(*dns.DNAME)); cname != nil { - answer, ns, extra, rcode := z.additionalProcessing(ctx, state, elem, []dns.RR{cname}) + answer, ns, extra, rcode := z.externalLookup(ctx, state, elem, []dns.RR{cname}) if do { sigs := elem.Type(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeDNAME) + sigs = rrutil.SubTypeSignature(sigs, dns.TypeDNAME) dnamerrs = append(dnamerrs, sigs...) } @@ -140,9 +133,9 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) continue } - glue := z.Glue(nsrrs, do) + glue := tr.Glue(nsrrs, do) if do { - dss := z.typeFromElem(elem, dns.TypeDS, do) + dss := typeFromElem(elem, dns.TypeDS, do) nsrrs = append(nsrrs, dss...) } @@ -161,16 +154,16 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) if found && shot { if rrs := elem.Type(dns.TypeCNAME); len(rrs) > 0 && qtype != dns.TypeCNAME { - return z.additionalProcessing(ctx, state, elem, rrs) + return z.externalLookup(ctx, state, elem, rrs) } rrs := elem.Type(qtype) // NODATA if len(rrs) == 0 { - ret := z.soa(do) + ret := ap.soa(do) if do { - nsec := z.typeFromElem(elem, dns.TypeNSEC, do) + nsec := typeFromElem(elem, dns.TypeNSEC, do) ret = append(ret, nsec...) } return nil, ret, nil, NoData @@ -178,15 +171,15 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // Additional section processing for MX, SRV. Check response and see if any of the names are in baliwick - // if so add IP addresses to the additional section. - additional := additionalProcessing(z, rrs, do) + additional := z.additionalProcessing(rrs, do) if do { sigs := elem.Type(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, qtype) + sigs = rrutil.SubTypeSignature(sigs, qtype) rrs = append(rrs, sigs...) } - return rrs, z.ns(do), additional, Success + return rrs, ap.ns(do), additional, Success } @@ -194,19 +187,19 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // Found wildcard. if wildElem != nil { - auth := z.ns(do) + auth := ap.ns(do) if rrs := wildElem.TypeForWildcard(dns.TypeCNAME, qname); len(rrs) > 0 { - return z.additionalProcessing(ctx, state, wildElem, rrs) + return z.externalLookup(ctx, state, wildElem, rrs) } rrs := wildElem.TypeForWildcard(qtype, qname) // NODATA response. if len(rrs) == 0 { - ret := z.soa(do) + ret := ap.soa(do) if do { - nsec := z.typeFromElem(wildElem, dns.TypeNSEC, do) + nsec := typeFromElem(wildElem, dns.TypeNSEC, do) ret = append(ret, nsec...) } return nil, ret, nil, Success @@ -214,13 +207,13 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) if do { // An NSEC is needed to say no longer name exists under this wildcard. - if deny, found := z.Tree.Prev(qname); found { - nsec := z.typeFromElem(deny, dns.TypeNSEC, do) + if deny, found := tr.Prev(qname); found { + nsec := typeFromElem(deny, dns.TypeNSEC, do) auth = append(auth, nsec...) } sigs := wildElem.TypeForWildcard(dns.TypeRRSIG, qname) - sigs = signatureForSubType(sigs, qtype) + sigs = rrutil.SubTypeSignature(sigs, qtype) rrs = append(rrs, sigs...) } @@ -231,19 +224,19 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // Hacky way to get around empty-non-terminals. If a longer name does exist, but this qname, does not, it // must be an empty-non-terminal. If so, we do the proper NXDOMAIN handling, but set the rcode to be success. - if x, found := z.Tree.Next(qname); found { + if x, found := tr.Next(qname); found { if dns.IsSubDomain(qname, x.Name()) { rcode = Success } } - ret := z.soa(do) + ret := ap.soa(do) if do { - deny, found := z.Tree.Prev(qname) + deny, found := tr.Prev(qname) if !found { goto Out } - nsec := z.typeFromElem(deny, dns.TypeNSEC, do) + nsec := typeFromElem(deny, dns.TypeNSEC, do) ret = append(ret, nsec...) if rcode != NameError { @@ -256,10 +249,10 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) if found { // wildcard denial wildcard := "*." + ce.Name() - if ss, found := z.Tree.Prev(wildcard); found { + if ss, found := tr.Prev(wildcard); found { // Only add this nsec if it is different than the one already added if ss.Name() != deny.Name() { - nsec := z.typeFromElem(ss, dns.TypeNSEC, do) + nsec := typeFromElem(ss, dns.TypeNSEC, do) ret = append(ret, nsec...) } } @@ -270,54 +263,50 @@ Out: return nil, ret, nil, rcode } -// Return type tp from e and add signatures (if they exists) and do is true. -func (z *Zone) typeFromElem(elem *tree.Elem, tp uint16, do bool) []dns.RR { +// typeFromElem returns the type tp from e and adds signatures (if they exist) and do is true. +func typeFromElem(elem *tree.Elem, tp uint16, do bool) []dns.RR { rrs := elem.Type(tp) if do { sigs := elem.Type(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, tp) - if len(sigs) > 0 { - rrs = append(rrs, sigs...) - } + sigs = rrutil.SubTypeSignature(sigs, tp) + rrs = append(rrs, sigs...) } return rrs } -func (z *Zone) soa(do bool) []dns.RR { +func (a Apex) soa(do bool) []dns.RR { if do { - ret := append([]dns.RR{z.Apex.SOA}, z.Apex.SIGSOA...) + ret := append([]dns.RR{a.SOA}, a.SIGSOA...) return ret } - return []dns.RR{z.Apex.SOA} + return []dns.RR{a.SOA} } -func (z *Zone) ns(do bool) []dns.RR { +func (a Apex) ns(do bool) []dns.RR { if do { - ret := append(z.Apex.NS, z.Apex.SIGNS...) + ret := append(a.NS, a.SIGNS...) return ret } - return z.Apex.NS + return a.NS } -// aditionalProcessing adds signatures and tries to resolve CNAMEs that point to external names. -func (z *Zone) additionalProcessing(ctx context.Context, state request.Request, elem *tree.Elem, rrs []dns.RR) ([]dns.RR, []dns.RR, []dns.RR, Result) { +// externalLookup adds signatures and tries to resolve CNAMEs that point to external names. +func (z *Zone) externalLookup(ctx context.Context, state request.Request, elem *tree.Elem, rrs []dns.RR) ([]dns.RR, []dns.RR, []dns.RR, Result) { qtype := state.QType() do := state.Do() if do { sigs := elem.Type(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeCNAME) - if len(sigs) > 0 { - rrs = append(rrs, sigs...) - } + sigs = rrutil.SubTypeSignature(sigs, dns.TypeCNAME) + rrs = append(rrs, sigs...) } targetName := rrs[0].(*dns.CNAME).Target elem, _ = z.Tree.Search(targetName) if elem == nil { - rrs = append(rrs, z.externalLookup(ctx, state, targetName, qtype)...) - return rrs, z.ns(do), nil, Success + rrs = append(rrs, z.doLookup(ctx, state, targetName, qtype)...) + return rrs, z.Apex.ns(do), nil, Success } i := 0 @@ -329,53 +318,39 @@ Redo: if do { sigs := elem.Type(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeCNAME) - if len(sigs) > 0 { - rrs = append(rrs, sigs...) - } + sigs = rrutil.SubTypeSignature(sigs, dns.TypeCNAME) + rrs = append(rrs, sigs...) } targetName := cname[0].(*dns.CNAME).Target elem, _ = z.Tree.Search(targetName) if elem == nil { - rrs = append(rrs, z.externalLookup(ctx, state, targetName, qtype)...) - return rrs, z.ns(do), nil, Success + rrs = append(rrs, z.doLookup(ctx, state, targetName, qtype)...) + return rrs, z.Apex.ns(do), nil, Success } i++ - if i > maxChain { - return rrs, z.ns(do), nil, Success + if i > 8 { + return rrs, z.Apex.ns(do), nil, Success } goto Redo } - targets := cnameForType(elem.All(), qtype) + targets := rrutil.CNAMEForType(elem.All(), qtype) if len(targets) > 0 { rrs = append(rrs, targets...) if do { sigs := elem.Type(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, qtype) - if len(sigs) > 0 { - rrs = append(rrs, sigs...) - } + sigs = rrutil.SubTypeSignature(sigs, qtype) + rrs = append(rrs, sigs...) } } - return rrs, z.ns(do), nil, Success -} - -func cnameForType(targets []dns.RR, origQtype uint16) []dns.RR { - ret := []dns.RR{} - for _, target := range targets { - if target.Header().Rrtype == origQtype { - ret = append(ret, target) - } - } - return ret + return rrs, z.Apex.ns(do), nil, Success } -func (z *Zone) externalLookup(ctx context.Context, state request.Request, target string, qtype uint16) []dns.RR { +func (z *Zone) doLookup(ctx context.Context, state request.Request, target string, qtype uint16) []dns.RR { m, e := z.Upstream.Lookup(ctx, state, target, qtype) if e != nil { return nil @@ -386,59 +361,9 @@ func (z *Zone) externalLookup(ctx context.Context, state request.Request, target return m.Answer } -// signatureForSubType range through the signature and return the correct ones for the subtype. -func signatureForSubType(rrs []dns.RR, subtype uint16) []dns.RR { - sigs := []dns.RR{} - for _, sig := range rrs { - if s, ok := sig.(*dns.RRSIG); ok { - if s.TypeCovered == subtype { - sigs = append(sigs, s) - } - } - } - return sigs -} - -// Glue returns any potential glue records for nsrrs. -func (z *Zone) Glue(nsrrs []dns.RR, do bool) []dns.RR { - glue := []dns.RR{} - for _, rr := range nsrrs { - if ns, ok := rr.(*dns.NS); ok && dns.IsSubDomain(ns.Header().Name, ns.Ns) { - glue = append(glue, z.searchGlue(ns.Ns, do)...) - } - } - return glue -} - -// searchGlue looks up A and AAAA for name. -func (z *Zone) searchGlue(name string, do bool) []dns.RR { - glue := []dns.RR{} - - // A - if elem, found := z.Tree.Search(name); found { - glue = append(glue, elem.Type(dns.TypeA)...) - if do { - sigs := elem.Type(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeA) - glue = append(glue, sigs...) - } - } - - // AAAA - if elem, found := z.Tree.Search(name); found { - glue = append(glue, elem.Type(dns.TypeAAAA)...) - if do { - sigs := elem.Type(dns.TypeRRSIG) - sigs = signatureForSubType(sigs, dns.TypeAAAA) - glue = append(glue, sigs...) - } - } - return glue -} - // additionalProcessing checks the current answer section and retrieves A or AAAA records // (and possible SIGs) to need to be put in the additional section. -func additionalProcessing(z *Zone, answer []dns.RR, do bool) (extra []dns.RR) { +func (z *Zone) additionalProcessing(answer []dns.RR, do bool) (extra []dns.RR) { for _, rr := range answer { name := "" switch x := rr.(type) { @@ -461,7 +386,7 @@ func additionalProcessing(z *Zone, answer []dns.RR, do bool) (extra []dns.RR) { if a := elem.Type(addr); a != nil { extra = append(extra, a...) if do { - sig := signatureForSubType(sigs, addr) + sig := rrutil.SubTypeSignature(sigs, addr) extra = append(extra, sig...) } } @@ -470,5 +395,3 @@ func additionalProcessing(z *Zone, answer []dns.RR, do bool) (extra []dns.RR) { return extra } - -const maxChain = 8 diff --git a/plugin/file/reload.go b/plugin/file/reload.go index ce5c81335a3..eb762ac5279 100644 --- a/plugin/file/reload.go +++ b/plugin/file/reload.go @@ -13,10 +13,8 @@ func (z *Zone) Reload() error { tick := time.NewTicker(z.ReloadInterval) go func() { - for { select { - case <-tick.C: zFile := z.File() reader, err := os.Open(zFile) @@ -35,10 +33,10 @@ func (z *Zone) Reload() error { } // copy elements we need - z.reloadMu.Lock() + z.Lock() z.Apex = zone.Apex z.Tree = zone.Tree - z.reloadMu.Unlock() + z.Unlock() log.Infof("Successfully reloaded zone %q in %q with serial %d", z.origin, zFile, z.Apex.SOA.Serial) z.Notify() @@ -52,11 +50,10 @@ func (z *Zone) Reload() error { return nil } -// SOASerialIfDefined returns the SOA's serial if the zone has a SOA record in the Apex, or -// -1 otherwise. +// SOASerialIfDefined returns the SOA's serial if the zone has a SOA record in the Apex, or -1 otherwise. func (z *Zone) SOASerialIfDefined() int64 { - z.reloadMu.Lock() - defer z.reloadMu.Unlock() + z.RLock() + defer z.RUnlock() if z.Apex.SOA != nil { return int64(z.Apex.SOA.Serial) } diff --git a/plugin/file/rrutil/util.go b/plugin/file/rrutil/util.go new file mode 100644 index 00000000000..63e447196a6 --- /dev/null +++ b/plugin/file/rrutil/util.go @@ -0,0 +1,29 @@ +// Package rrutil provides function to find certain RRs in slices. +package rrutil + +import "github.com/miekg/dns" + +// SubTypeSignature returns the RRSIG for the subtype. +func SubTypeSignature(rrs []dns.RR, subtype uint16) []dns.RR { + sigs := []dns.RR{} + // there may be multiple keys that have signed this subtype + for _, sig := range rrs { + if s, ok := sig.(*dns.RRSIG); ok { + if s.TypeCovered == subtype { + sigs = append(sigs, s) + } + } + } + return sigs +} + +// CNAMEForType returns the RR that have the qtype from targets. +func CNAMEForType(rrs []dns.RR, qtype uint16) []dns.RR { + ret := []dns.RR{} + for _, target := range rrs { + if target.Header().Rrtype == qtype { + ret = append(ret, target) + } + } + return ret +} diff --git a/plugin/file/secondary.go b/plugin/file/secondary.go index ed94daad6f5..408ee887a32 100644 --- a/plugin/file/secondary.go +++ b/plugin/file/secondary.go @@ -51,11 +51,11 @@ Transfer: return Err } - z.apexMu.Lock() + z.Lock() z.Tree = z1.Tree z.Apex = z1.Apex *z.Expired = false - z.apexMu.Unlock() + z.Unlock() log.Infof("Transferred: %s from %s", z.origin, tr) return nil } diff --git a/plugin/file/tree/glue.go b/plugin/file/tree/glue.go new file mode 100644 index 00000000000..937ae548202 --- /dev/null +++ b/plugin/file/tree/glue.go @@ -0,0 +1,44 @@ +package tree + +import ( + "github.com/coredns/coredns/plugin/file/rrutil" + + "github.com/miekg/dns" +) + +// Glue returns any potential glue records for nsrrs. +func (t *Tree) Glue(nsrrs []dns.RR, do bool) []dns.RR { + glue := []dns.RR{} + for _, rr := range nsrrs { + if ns, ok := rr.(*dns.NS); ok && dns.IsSubDomain(ns.Header().Name, ns.Ns) { + glue = append(glue, t.searchGlue(ns.Ns, do)...) + } + } + return glue +} + +// searchGlue looks up A and AAAA for name. +func (t *Tree) searchGlue(name string, do bool) []dns.RR { + glue := []dns.RR{} + + // A + if elem, found := t.Search(name); found { + glue = append(glue, elem.Type(dns.TypeA)...) + if do { + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, dns.TypeA) + glue = append(glue, sigs...) + } + } + + // AAAA + if elem, found := t.Search(name); found { + glue = append(glue, elem.Type(dns.TypeAAAA)...) + if do { + sigs := elem.Type(dns.TypeRRSIG) + sigs = rrutil.SubTypeSignature(sigs, dns.TypeAAAA) + glue = append(glue, sigs...) + } + } + return glue +} diff --git a/plugin/file/zone.go b/plugin/file/zone.go index 3eb99ef66e5..1fcfc5d332b 100644 --- a/plugin/file/zone.go +++ b/plugin/file/zone.go @@ -15,14 +15,15 @@ import ( "github.com/miekg/dns" ) -// Zone defines a structure that contains all data related to a DNS zone. +// Zone is a structure that contains all data related to a DNS zone. type Zone struct { origin string origLen int file string *tree.Tree Apex - apexMu sync.RWMutex + + sync.RWMutex TransferTo []string StartupOnce sync.Once @@ -30,9 +31,9 @@ type Zone struct { Expired *bool ReloadInterval time.Duration - reloadMu sync.RWMutex reloadShutdown chan bool - Upstream *upstream.Upstream // Upstream for looking up external names during the resolution process. + + Upstream *upstream.Upstream // Upstream for looking up external names during the resolution process. } // Apex contains the apex records of a zone: SOA, NS and their potential signatures. @@ -122,21 +123,18 @@ func (z *Zone) Insert(r dns.RR) error { return nil } -// Delete deletes r from z. -func (z *Zone) Delete(r dns.RR) { z.Tree.Delete(r) } - -// File retrieves the file path in a safe way +// File retrieves the file path in a safe way. func (z *Zone) File() string { - z.reloadMu.Lock() - defer z.reloadMu.Unlock() + z.RLock() + defer z.RUnlock() return z.file } -// SetFile updates the file path in a safe way +// SetFile updates the file path in a safe way. func (z *Zone) SetFile(path string) { - z.reloadMu.Lock() + z.Lock() z.file = path - z.reloadMu.Unlock() + z.Unlock() } // TransferAllowed checks if incoming request for transferring the zone is allowed according to the ACLs. @@ -162,18 +160,16 @@ func (z *Zone) TransferAllowed(state request.Request) bool { // All returns all records from the zone, the first record will be the SOA record, // otionally followed by all RRSIG(SOA)s. func (z *Zone) All() []dns.RR { - if z.ReloadInterval > 0 { - z.reloadMu.RLock() - defer z.reloadMu.RUnlock() - } - records := []dns.RR{} + z.RLock() allNodes := z.Tree.All() + z.RUnlock() + for _, a := range allNodes { records = append(records, a.All()...) } - // Either the entire Apex is filled or none it, this isn't enforced here though. + z.RLock() if len(z.Apex.SIGNS) > 0 { records = append(z.Apex.SIGNS, records...) } @@ -184,9 +180,12 @@ func (z *Zone) All() []dns.RR { if len(z.Apex.SIGSOA) > 0 { records = append(z.Apex.SIGSOA, records...) } + if z.Apex.SOA != nil { + z.RUnlock() return append([]dns.RR{z.Apex.SOA}, records...) } + z.RUnlock() return records } From 048987fca5b31f5e55c38ecdc26a115c4e6e4774 Mon Sep 17 00:00:00 2001 From: Erfan Besharat Date: Wed, 24 Jul 2019 22:09:07 +0430 Subject: [PATCH 032/324] plugin/file: Add SOA serial to axfr log (#3042) --- plugin/file/xfr.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/file/xfr.go b/plugin/file/xfr.go index 08f71030aa6..b2dbd145897 100644 --- a/plugin/file/xfr.go +++ b/plugin/file/xfr.go @@ -42,7 +42,7 @@ func (x Xfr) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (in j, l := 0, 0 records = append(records, records[0]) // add closing SOA to the end - log.Infof("Outgoing transfer of %d records of zone %s to %s started", len(records), x.origin, state.IP()) + log.Infof("Outgoing transfer of %d records of zone %s to %s started with %d SOA serial", len(records), x.origin, state.IP(), x.SOASerialIfDefined()) for i, r := range records { l += dns.Len(r) if l > transferLength { From 2a41b9a93b69483298fd5933a095c40eea99529f Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 25 Jul 2019 13:42:02 +0000 Subject: [PATCH 033/324] Add 1.6.0 release notes (#3044) * Add 1.6.0 release notes Preliminary notes for the 1.6.0 release Signed-off-by: Miek Gieben * Update notes/coredns-1.6.0.md Co-Authored-By: Michael Grosser * Update notes/coredns-1.6.0.md Co-Authored-By: Michael Grosser --- notes/coredns-1.6.0.md | 48 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 notes/coredns-1.6.0.md diff --git a/notes/coredns-1.6.0.md b/notes/coredns-1.6.0.md new file mode 100644 index 00000000000..28b1070028f --- /dev/null +++ b/notes/coredns-1.6.0.md @@ -0,0 +1,48 @@ ++++ +title = "CoreDNS-1.6.0 Release" +description = "CoreDNS-1.6.0 Release Notes." +tags = ["Release", "1.6.0", "Notes"] +release = "1.6.0" +date = 2019-07-03T07:35:47+01:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.0](https://github.com/coredns/coredns/releases/tag/v1.6.0). + +The `-cpu` flag is removed from this version. + +This release sports changes in the *file* plugin. A speed up in the *log* plugin and fixes in the +*cache* plugin. + +Upcoming deprecation: the kubernetes *federation* plugin will be moved to +github.com/coredns/federation. This is likely to happen in CoreDNS 1.7.0. + +# Plugins + +* The [*file*](/plugins/file) a lot of bug fixes and it loads lazily on start, i.e. if the zonefile + does not exist, it keeps trying with every reload period. +* The [*cache*](/plugins/cache) fixes a race. +* Multiple fixes in the [*route53*](/plugins/route53). +* And the [*kubernetes*](/plugins/kubernetes) removes the `resyncperiod` option. + +## Brought to You By + +* Wonderful people + +## Noteworthy Changes + +plugin/file: Simplify locking (https://github.com/coredns/coredns/pull/3024) +plugin/file: New zone should have zero records (https://github.com/coredns/coredns/pull/3025) +plugin/file: Rename do to walk and cleanup and document (https://github.com/coredns/coredns/pull/2987) +plugin/file: Fix setting ReloadInterval (https://github.com/coredns/coredns/pull/3017) +plugin/file: Make non-existent file non-fatal (https://github.com/coredns/coredns/pull/2955) +plugin/metrics: Fix response_rcode_count_total metric (https://github.com/coredns/coredns/pull/3029) +pkg/cache: Fix race in Add() and Evict() (https://github.com/coredns/coredns/pull/3013) +plugin/route53: Fix IAM credential file (https://github.com/coredns/coredns/pull/2983) +plugin/route53: Fix multiple credentials in route53 (https://github.com/coredns/coredns/pull/2859) +pkg/replacer: Evaluate format once and improve perf by ~3x (https://github.com/coredns/coredns/pull/3002) +plugin/log: Fix log plugin benchmark and slightly improve performance (https://github.com/coredns/coredns/pull/3004) +core: Scrub: TC bit is always set (https://github.com/coredns/coredns/pull/3001) +plugin/rewrite: Fix domain length validation (https://github.com/coredns/coredns/pull/2995) +plugin/kubernetes: Remove resyncperiod (https://github.com/coredns/coredns/pull/2923) From 89fa9bc61e1347c26ebfa87dc789673d049294f9 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 25 Jul 2019 18:53:07 +0000 Subject: [PATCH 034/324] plugin/host: don't append the names when reparsing hosts file (#3045) The host plugin kept on adding entries instead of overwriting. Split the inline cache off from the /etc/hosts file cache and clear /etc/hosts file cache and re-parsing. A bunch of other cleanup as well. Use functions defined in the plugin package, don't re-parse strings if you don't have to and use To4() to check the family for IP addresses. Fix all test cases a carried entries are always fqdn-ed. Various smaller cleanup in unnessacry constants. Fixes: #3014 Signed-off-by: Miek Gieben --- go.mod | 1 + go.sum | 2 + plugin/hosts/README.md | 11 ++- plugin/hosts/hosts.go | 30 +++---- plugin/hosts/hosts_test.go | 10 +-- plugin/hosts/hostsfile.go | 157 ++++++++++++++------------------- plugin/hosts/hostsfile_test.go | 74 ++++++++-------- plugin/hosts/setup.go | 6 +- plugin/hosts/setup_test.go | 27 +++--- 9 files changed, 147 insertions(+), 171 deletions(-) diff --git a/go.mod b/go.mod index 2ced9aec7e1..76118fe77a8 100644 --- a/go.mod +++ b/go.mod @@ -54,6 +54,7 @@ require ( golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.22.0 gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 diff --git a/go.sum b/go.sum index 5926f168b2f..203cbce4b2c 100644 --- a/go.sum +++ b/go.sum @@ -325,6 +325,8 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/plugin/hosts/README.md b/plugin/hosts/README.md index d42714828fd..60135e71ab3 100644 --- a/plugin/hosts/README.md +++ b/plugin/hosts/README.md @@ -11,14 +11,16 @@ file that exists on disk. It checks the file for changes and updates the zones a plugin only supports A, AAAA, and PTR records. The hosts plugin can be used with readily available hosts files that block access to advertising servers. -The plugin reloads the content of the hosts file every 5 seconds. Upon reload, CoreDNS will use the new definitions. -Should the file be deleted, any inlined content will continue to be served. When the file is restored, it will then again be used. +The plugin reloads the content of the hosts file every 5 seconds. Upon reload, CoreDNS will use the +new definitions. Should the file be deleted, any inlined content will continue to be served. When +the file is restored, it will then again be used. This plugin can only be used once per Server Block. ## The hosts file -Commonly the entries are of the form `IP_address canonical_hostname [aliases...]` as explained by the hosts(5) man page. +Commonly the entries are of the form `IP_address canonical_hostname [aliases...]` as explained by +the hosts(5) man page. Examples: @@ -34,7 +36,8 @@ fdfc:a744:27b5:3b0e::1 example.com example ### PTR records -PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file entries) and cannot be created manually. +PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file +entries) and cannot be created manually. ## Syntax diff --git a/plugin/hosts/hosts.go b/plugin/hosts/hosts.go index 8650053c03f..54b090b4d3c 100644 --- a/plugin/hosts/hosts.go +++ b/plugin/hosts/hosts.go @@ -31,7 +31,7 @@ func (h Hosts) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) ( if zone == "" { // PTR zones don't need to be specified in Origins if state.Type() != "PTR" { - // If this doesn't match we need to fall through regardless of h.Fallthrough + // if this doesn't match we need to fall through regardless of h.Fallthrough return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) } } @@ -89,7 +89,6 @@ func (h Hosts) otherRecordsExist(qtype uint16, qname string) bool { } } return false - } // Name implements the plugin.Handle interface. @@ -97,39 +96,36 @@ func (h Hosts) Name() string { return "hosts" } // a takes a slice of net.IPs and returns a slice of A RRs. func a(zone string, ttl uint32, ips []net.IP) []dns.RR { - answers := []dns.RR{} - for _, ip := range ips { + answers := make([]dns.RR, len(ips)) + for i, ip := range ips { r := new(dns.A) - r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypeA, - Class: dns.ClassINET, Ttl: ttl} + r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: ttl} r.A = ip - answers = append(answers, r) + answers[i] = r } return answers } // aaaa takes a slice of net.IPs and returns a slice of AAAA RRs. func aaaa(zone string, ttl uint32, ips []net.IP) []dns.RR { - answers := []dns.RR{} - for _, ip := range ips { + answers := make([]dns.RR, len(ips)) + for i, ip := range ips { r := new(dns.AAAA) - r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, Ttl: ttl} + r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: ttl} r.AAAA = ip - answers = append(answers, r) + answers[i] = r } return answers } // ptr takes a slice of host names and filters out the ones that aren't in Origins, if specified, and returns a slice of PTR RRs. func (h *Hosts) ptr(zone string, ttl uint32, names []string) []dns.RR { - answers := []dns.RR{} - for _, n := range names { + answers := make([]dns.RR, len(names)) + for i, n := range names { r := new(dns.PTR) - r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypePTR, - Class: dns.ClassINET, Ttl: ttl} + r.Hdr = dns.RR_Header{Name: zone, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: ttl} r.Ptr = dns.Fqdn(n) - answers = append(answers, r) + answers[i] = r } return answers } diff --git a/plugin/hosts/hosts_test.go b/plugin/hosts/hosts_test.go index 975710bb31b..062c998513c 100644 --- a/plugin/hosts/hosts_test.go +++ b/plugin/hosts/hosts_test.go @@ -2,7 +2,6 @@ package hosts import ( "context" - "io" "strings" "testing" @@ -12,20 +11,17 @@ import ( "github.com/miekg/dns" ) -func (h *Hostsfile) parseReader(r io.Reader) { - h.hmap = h.parse(r) -} - func TestLookupA(t *testing.T) { h := Hosts{ Next: test.ErrorHandler(), Hostsfile: &Hostsfile{ Origins: []string{"."}, - hmap: newHostsMap(), + hmap: newMap(), + inline: newMap(), options: newOptions(), }, } - h.parseReader(strings.NewReader(hostsExample)) + h.hmap = h.parse(strings.NewReader(hostsExample)) ctx := context.TODO() diff --git a/plugin/hosts/hostsfile.go b/plugin/hosts/hostsfile.go index 421f8a77cd7..f7cc73528ba 100644 --- a/plugin/hosts/hostsfile.go +++ b/plugin/hosts/hostsfile.go @@ -19,7 +19,8 @@ import ( "github.com/coredns/coredns/plugin" ) -func parseLiteralIP(addr string) net.IP { +// parseIP calls discards any v6 zone info, before calling net.ParseIP. +func parseIP(addr string) net.IP { if i := strings.Index(addr, "%"); i >= 0 { // discard ipv6 zone addr = addr[0:i] @@ -28,10 +29,6 @@ func parseLiteralIP(addr string) net.IP { return net.ParseIP(addr) } -func absDomainName(b string) string { - return plugin.Name(b).Normalize() -} - type options struct { // automatically generate IP to Hostname PTR entries // for host entries we parse @@ -48,48 +45,40 @@ func newOptions() *options { return &options{ autoReverse: true, ttl: 3600, - reload: durationOf5s, + reload: time.Duration(5 * time.Second), } } -type hostsMap struct { - // Key for the list of literal IP addresses must be a host - // name. It would be part of DNS labels, a FQDN or an absolute - // FQDN. - // For now the key is converted to lower case for convenience. - byNameV4 map[string][]net.IP - byNameV6 map[string][]net.IP +// Map contains the IPv4/IPv6 and reverse mapping. +type Map struct { + // Key for the list of literal IP addresses must be a FQDN lowercased host name. + name4 map[string][]net.IP + name6 map[string][]net.IP // Key for the list of host names must be a literal IP address - // including IPv6 address with zone identifier. + // including IPv6 address without zone identifier. // We don't support old-classful IP address notation. - byAddr map[string][]string + addr map[string][]string } -const ( - durationOf0s = time.Duration(0) - durationOf5s = time.Duration(5 * time.Second) -) - -func newHostsMap() *hostsMap { - return &hostsMap{ - byNameV4: make(map[string][]net.IP), - byNameV6: make(map[string][]net.IP), - byAddr: make(map[string][]string), +func newMap() *Map { + return &Map{ + name4: make(map[string][]net.IP), + name6: make(map[string][]net.IP), + addr: make(map[string][]string), } } -// Len returns the total number of addresses in the hostmap, this includes -// V4/V6 and any reverse addresses. -func (h *hostsMap) Len() int { +// Len returns the total number of addresses in the hostmap, this includes V4/V6 and any reverse addresses. +func (h *Map) Len() int { l := 0 - for _, v4 := range h.byNameV4 { + for _, v4 := range h.name4 { l += len(v4) } - for _, v6 := range h.byNameV6 { + for _, v6 := range h.name6 { l += len(v6) } - for _, a := range h.byAddr { + for _, a := range h.addr { l += len(a) } return l @@ -103,11 +92,10 @@ type Hostsfile struct { Origins []string // hosts maps for lookups - hmap *hostsMap + hmap *Map // inline saves the hosts file that is inlined in a Corefile. - // We need a copy here as we want to use it to initialize the maps for parse. - inline *hostsMap + inline *Map // path to the hosts file path string @@ -132,6 +120,7 @@ func (h *Hostsfile) readHosts() { h.RLock() size := h.size h.RUnlock() + if err == nil && h.mtime.Equal(stat.ModTime()) && size == stat.Size() { return } @@ -155,12 +144,11 @@ func (h *Hostsfile) initInline(inline []string) { } h.inline = h.parse(strings.NewReader(strings.Join(inline, "\n"))) - *h.hmap = *h.inline } -// Parse reads the hostsfile and populates the byName and byAddr maps. -func (h *Hostsfile) parse(r io.Reader) *hostsMap { - hmap := newHostsMap() +// Parse reads the hostsfile and populates the byName and addr maps. +func (h *Hostsfile) parse(r io.Reader) *Map { + hmap := newMap() scanner := bufio.NewScanner(r) for scanner.Scan() { @@ -173,73 +161,52 @@ func (h *Hostsfile) parse(r io.Reader) *hostsMap { if len(f) < 2 { continue } - addr := parseLiteralIP(string(f[0])) + addr := parseIP(string(f[0])) if addr == nil { continue } - ver := ipVersion(string(f[0])) + + family := 0 + if addr.To4() != nil { + family = 1 + } else { + family = 2 + } + for i := 1; i < len(f); i++ { - name := absDomainName(string(f[i])) + name := plugin.Name(string(f[i])).Normalize() if plugin.Zones(h.Origins).Matches(name) == "" { // name is not in Origins continue } - switch ver { - case 4: - hmap.byNameV4[name] = append(hmap.byNameV4[name], addr) - case 6: - hmap.byNameV6[name] = append(hmap.byNameV6[name], addr) + switch family { + case 1: + hmap.name4[name] = append(hmap.name4[name], addr) + case 2: + hmap.name6[name] = append(hmap.name6[name], addr) default: continue } if !h.options.autoReverse { continue } - hmap.byAddr[addr.String()] = append(hmap.byAddr[addr.String()], name) + hmap.addr[addr.String()] = append(hmap.addr[addr.String()], name) } } - for name := range h.hmap.byNameV4 { - hmap.byNameV4[name] = append(hmap.byNameV4[name], h.hmap.byNameV4[name]...) - } - for name := range h.hmap.byNameV4 { - hmap.byNameV6[name] = append(hmap.byNameV6[name], h.hmap.byNameV6[name]...) - } - - for addr := range h.hmap.byAddr { - hmap.byAddr[addr] = append(hmap.byAddr[addr], h.hmap.byAddr[addr]...) - } - return hmap } -// ipVersion returns what IP version was used textually -// For why the string is parsed end to start, -// see IPv4-Compatible IPv6 addresses - RFC 4291 section 2.5.5 -func ipVersion(s string) int { - for i := len(s) - 1; i >= 0; i-- { - switch s[i] { - case '.': - return 4 - case ':': - return 6 - } - } - return 0 -} - -// LookupStaticHost looks up the IP addresses for the given host from the hosts file. -func (h *Hostsfile) lookupStaticHost(hmapByName map[string][]net.IP, host string) []net.IP { - fqhost := absDomainName(host) - +// lookupStaticHost looks up the IP addresses for the given host from the hosts file. +func (h *Hostsfile) lookupStaticHost(m map[string][]net.IP, host string) []net.IP { h.RLock() defer h.RUnlock() - if len(hmapByName) == 0 { + if len(m) == 0 { return nil } - ips, ok := hmapByName[fqhost] + ips, ok := m[host] if !ok { return nil } @@ -250,30 +217,38 @@ func (h *Hostsfile) lookupStaticHost(hmapByName map[string][]net.IP, host string // LookupStaticHostV4 looks up the IPv4 addresses for the given host from the hosts file. func (h *Hostsfile) LookupStaticHostV4(host string) []net.IP { - return h.lookupStaticHost(h.hmap.byNameV4, host) + host = strings.ToLower(host) + ip1 := h.lookupStaticHost(h.hmap.name4, host) + ip2 := h.lookupStaticHost(h.inline.name4, host) + return append(ip1, ip2...) } // LookupStaticHostV6 looks up the IPv6 addresses for the given host from the hosts file. func (h *Hostsfile) LookupStaticHostV6(host string) []net.IP { - return h.lookupStaticHost(h.hmap.byNameV6, host) + host = strings.ToLower(host) + ip1 := h.lookupStaticHost(h.hmap.name6, host) + ip2 := h.lookupStaticHost(h.inline.name6, host) + return append(ip1, ip2...) } // LookupStaticAddr looks up the hosts for the given address from the hosts file. func (h *Hostsfile) LookupStaticAddr(addr string) []string { - h.RLock() - defer h.RUnlock() - addr = parseLiteralIP(addr).String() + addr = parseIP(addr).String() if addr == "" { return nil } - if len(h.hmap.byAddr) == 0 { - return nil - } - hosts, ok := h.hmap.byAddr[addr] - if !ok { + + h.RLock() + defer h.RUnlock() + hosts1, _ := h.hmap.addr[addr] + hosts2, _ := h.inline.addr[addr] + + if len(hosts1) == 0 && len(hosts2) == 0 { return nil } - hostsCp := make([]string, len(hosts)) - copy(hostsCp, hosts) + + hostsCp := make([]string, len(hosts1)+len(hosts2)) + copy(hostsCp, hosts1) + copy(hostsCp[len(hosts1):], hosts2) return hostsCp } diff --git a/plugin/hosts/hostsfile_test.go b/plugin/hosts/hostsfile_test.go index db0e63d75d8..626b8918d46 100644 --- a/plugin/hosts/hostsfile_test.go +++ b/plugin/hosts/hostsfile_test.go @@ -9,15 +9,18 @@ import ( "reflect" "strings" "testing" + + "github.com/coredns/coredns/plugin" ) func testHostsfile(file string) *Hostsfile { h := &Hostsfile{ Origins: []string{"."}, - hmap: newHostsMap(), + hmap: newMap(), + inline: newMap(), options: newOptions(), } - h.parseReader(strings.NewReader(file)) + h.hmap = h.parse(strings.NewReader(file)) return h } @@ -74,44 +77,43 @@ var lookupStaticHostTests = []struct { { hosts, []staticHostEntry{ - {"odin", []string{"127.0.0.2", "127.0.0.3"}, []string{"::2"}}, - {"thor", []string{"127.1.1.1"}, []string{}}, - {"ullr", []string{"127.1.1.2"}, []string{}}, - {"ullrhost", []string{"127.1.1.2"}, []string{}}, - {"localhost", []string{}, []string{"fe80::1"}}, + {"odin.", []string{"127.0.0.2", "127.0.0.3"}, []string{"::2"}}, + {"thor.", []string{"127.1.1.1"}, []string{}}, + {"ullr.", []string{"127.1.1.2"}, []string{}}, + {"ullrhost.", []string{"127.1.1.2"}, []string{}}, + {"localhost.", []string{}, []string{"fe80::1"}}, }, }, { singlelinehosts, // see golang.org/issue/6646 []staticHostEntry{ - {"odin", []string{"127.0.0.2"}, []string{}}, + {"odin.", []string{"127.0.0.2"}, []string{}}, }, }, { ipv4hosts, []staticHostEntry{ - {"localhost", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, []string{}}, - {"localhost.localdomain", []string{"127.0.0.3"}, []string{}}, + {"localhost.", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, []string{}}, + {"localhost.localdomain.", []string{"127.0.0.3"}, []string{}}, }, }, { ipv6hosts, []staticHostEntry{ - {"localhost", []string{}, []string{"::1", "fe80::1", "fe80::2", "fe80::3"}}, - {"localhost.localdomain", []string{}, []string{"fe80::3"}}, + {"localhost.", []string{}, []string{"::1", "fe80::1", "fe80::2", "fe80::3"}}, + {"localhost.localdomain.", []string{}, []string{"fe80::3"}}, }, }, { casehosts, []staticHostEntry{ - {"PreserveMe", []string{"127.0.0.1"}, []string{"::1"}}, - {"PreserveMe.local", []string{"127.0.0.1"}, []string{"::1"}}, + {"PreserveMe.", []string{"127.0.0.1"}, []string{"::1"}}, + {"PreserveMe.local.", []string{"127.0.0.1"}, []string{"::1"}}, }, }, } func TestLookupStaticHost(t *testing.T) { - for _, tt := range lookupStaticHostTests { h := testHostsfile(tt.file) for _, ent := range tt.ents { @@ -121,7 +123,7 @@ func TestLookupStaticHost(t *testing.T) { } func testStaticHost(t *testing.T, ent staticHostEntry, h *Hostsfile) { - ins := []string{ent.in, absDomainName(ent.in), strings.ToLower(ent.in), strings.ToUpper(ent.in)} + ins := []string{ent.in, plugin.Name(ent.in).Normalize(), strings.ToLower(ent.in), strings.ToUpper(ent.in)} for k, in := range ins { addrsV4 := h.LookupStaticHostV4(in) if len(addrsV4) != len(ent.v4) { @@ -156,43 +158,43 @@ var lookupStaticAddrTests = []struct { { hosts, []staticIPEntry{ - {"255.255.255.255", []string{"broadcasthost"}}, - {"127.0.0.2", []string{"odin"}}, - {"127.0.0.3", []string{"odin"}}, - {"::2", []string{"odin"}}, - {"127.1.1.1", []string{"thor"}}, - {"127.1.1.2", []string{"ullr", "ullrhost"}}, - {"fe80::1", []string{"localhost"}}, + {"255.255.255.255", []string{"broadcasthost."}}, + {"127.0.0.2", []string{"odin."}}, + {"127.0.0.3", []string{"odin."}}, + {"::2", []string{"odin."}}, + {"127.1.1.1", []string{"thor."}}, + {"127.1.1.2", []string{"ullr.", "ullrhost."}}, + {"fe80::1", []string{"localhost."}}, }, }, { singlelinehosts, // see golang.org/issue/6646 []staticIPEntry{ - {"127.0.0.2", []string{"odin"}}, + {"127.0.0.2", []string{"odin."}}, }, }, { ipv4hosts, // see golang.org/issue/8996 []staticIPEntry{ - {"127.0.0.1", []string{"localhost"}}, - {"127.0.0.2", []string{"localhost"}}, - {"127.0.0.3", []string{"localhost", "localhost.localdomain"}}, + {"127.0.0.1", []string{"localhost."}}, + {"127.0.0.2", []string{"localhost."}}, + {"127.0.0.3", []string{"localhost.", "localhost.localdomain."}}, }, }, { ipv6hosts, // see golang.org/issue/8996 []staticIPEntry{ - {"::1", []string{"localhost"}}, - {"fe80::1", []string{"localhost"}}, - {"fe80::2", []string{"localhost"}}, - {"fe80::3", []string{"localhost", "localhost.localdomain"}}, + {"::1", []string{"localhost."}}, + {"fe80::1", []string{"localhost."}}, + {"fe80::2", []string{"localhost."}}, + {"fe80::3", []string{"localhost.", "localhost.localdomain."}}, }, }, { casehosts, // see golang.org/issue/12806 []staticIPEntry{ - {"127.0.0.1", []string{"PreserveMe", "PreserveMe.local"}}, - {"::1", []string{"PreserveMe", "PreserveMe.local"}}, + {"127.0.0.1", []string{"PreserveMe.", "PreserveMe.local."}}, + {"::1", []string{"PreserveMe.", "PreserveMe.local."}}, }, }, } @@ -209,7 +211,7 @@ func TestLookupStaticAddr(t *testing.T) { func testStaticAddr(t *testing.T, ent staticIPEntry, h *Hostsfile) { hosts := h.LookupStaticAddr(ent.in) for i := range ent.out { - ent.out[i] = absDomainName(ent.out[i]) + ent.out[i] = plugin.Name(ent.out[i]).Normalize() } if !reflect.DeepEqual(hosts, ent.out) { t.Errorf("%s, lookupStaticAddr(%s) = %v; want %v", h.path, ent.in, hosts, h) @@ -221,7 +223,7 @@ func TestHostCacheModification(t *testing.T) { // See https://github.com/golang/go/issues/14212. h := testHostsfile(ipv4hosts) - ent := staticHostEntry{"localhost", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, []string{}} + ent := staticHostEntry{"localhost.", []string{"127.0.0.1", "127.0.0.2", "127.0.0.3"}, []string{}} testStaticHost(t, ent, h) // Modify the addresses return by lookupStaticHost. addrs := h.LookupStaticHostV6(ent.in) @@ -231,7 +233,7 @@ func TestHostCacheModification(t *testing.T) { testStaticHost(t, ent, h) h = testHostsfile(ipv6hosts) - entip := staticIPEntry{"::1", []string{"localhost"}} + entip := staticIPEntry{"::1", []string{"localhost."}} testStaticAddr(t, entip, h) // Modify the hosts return by lookupStaticAddr. hosts := h.LookupStaticAddr(entip.in) diff --git a/plugin/hosts/setup.go b/plugin/hosts/setup.go index 26c3c82d3df..73fd3cec4ae 100644 --- a/plugin/hosts/setup.go +++ b/plugin/hosts/setup.go @@ -26,7 +26,7 @@ func init() { func periodicHostsUpdate(h *Hosts) chan bool { parseChan := make(chan bool) - if h.options.reload == durationOf0s { + if h.options.reload == 0 { return parseChan } @@ -78,7 +78,7 @@ func hostsParse(c *caddy.Controller) (Hosts, error) { h := Hosts{ Hostsfile: &Hostsfile{ path: "/etc/hosts", - hmap: newHostsMap(), + hmap: newMap(), options: options, }, } @@ -152,7 +152,7 @@ func hostsParse(c *caddy.Controller) (Hosts, error) { if err != nil { return h, c.Errf("invalid duration for reload '%s'", remaining[0]) } - if reload < durationOf0s { + if reload < 0 { return h, c.Errf("invalid negative duration for reload '%s'", remaining[0]) } options.reload = reload diff --git a/plugin/hosts/setup_test.go b/plugin/hosts/setup_test.go index 0e3800112ae..fd8a8060e70 100644 --- a/plugin/hosts/setup_test.go +++ b/plugin/hosts/setup_test.go @@ -100,7 +100,7 @@ func TestHostsInlineParse(t *testing.T) { tests := []struct { inputFileRules string shouldErr bool - expectedbyAddr map[string][]string + expectedaddr map[string][]string expectedFallthrough fall.F }{ { @@ -148,19 +148,20 @@ func TestHostsInlineParse(t *testing.T) { t.Fatalf("Test %d expected no errors, but got '%v'", i, err) } else if !test.shouldErr { if !h.Fall.Equal(test.expectedFallthrough) { - t.Fatalf("Test %d expected fallthrough of %v, got %v", i, test.expectedFallthrough, h.Fall) + t.Errorf("Test %d expected fallthrough of %v, got %v", i, test.expectedFallthrough, h.Fall) } - for k, expectedVal := range test.expectedbyAddr { - if val, ok := h.hmap.byAddr[k]; !ok { - t.Fatalf("Test %d expected %v, got no entry", i, k) - } else { - if len(expectedVal) != len(val) { - t.Fatalf("Test %d expected %v records for %v, got %v", i, len(expectedVal), k, len(val)) - } - for j := range expectedVal { - if expectedVal[j] != val[j] { - t.Fatalf("Test %d expected %v for %v, got %v", i, expectedVal[j], j, val[j]) - } + for k, expectedVal := range test.expectedaddr { + val, ok := h.inline.addr[k] + if !ok { + t.Errorf("Test %d expected %v, got no entry", i, k) + continue + } + if len(expectedVal) != len(val) { + t.Errorf("Test %d expected %v records for %v, got %v", i, len(expectedVal), k, len(val)) + } + for j := range expectedVal { + if expectedVal[j] != val[j] { + t.Errorf("Test %d expected %v for %v, got %v", i, expectedVal[j], j, val[j]) } } } From ee6c5f87d1ef5a00cf4383946ef6b98fe3d9a0db Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Thu, 25 Jul 2019 13:20:23 -0700 Subject: [PATCH 035/324] Update CONTRIBUTING.md (#3046) Signed-off-by: Yong Tang --- CONTRIBUTING.md | 6 +++++- README.md | 4 ++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bb38013479c..18bd4a62b5d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,11 +29,15 @@ tests to assert your change is working properly and is thoroughly covered. First, please [search](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with a variety of keywords to ensure your suggestion/proposal is new. -If so, you may open either an issue or a pull request for discussion and feedback. +Please also check for existing pull requests to see if someone is already working on this. We want to avoid duplication of effort. + +If the proposal is new and no one has opened pull request yet, you may open either an issue or a pull request for discussion and feedback. If you are going to spend significant time implementing code for a pull request, best to open an issue first and "claim" it and get feedback before you invest a lot of time. +**If someone already opened a pull request, but you think the pull request has stalled and you would like to open another pull request for the same or similar feature, get some of the maintainers (see [OWNERS](OWNERS)) involved to resolve the situation and move things forward.** + If possible make a pull request as small as possible, or submit multiple pull request to complete a feature. Smaller means: easier to understand and review. This in turn means things can be merged faster. diff --git a/README.md b/README.md index d881401c475..78c02ed5558 100644 --- a/README.md +++ b/README.md @@ -184,6 +184,10 @@ More resources can be found: - Twitter: [@corednsio](https://twitter.com/corednsio) - Mailing list/group: (not very active) +## Contribution guidelines + +If you want to contribute to CoreDNS, be sure to review the [contribution guidelines](CONTRIBUTING.md). + ## Deployment Examples for deployment via systemd and other use cases can be found in the [deployment From 4f268ad801e5a8630d628659a2d265fced3906df Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Thu, 25 Jul 2019 16:24:12 -0400 Subject: [PATCH 036/324] update to client-go 12.0.0 (#3047) --- go.mod | 8 +++----- go.sum | 52 ++++++++++++++++++++++++++++++++++------------------ 2 files changed, 37 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index 76118fe77a8..203777f02cd 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/golang/protobuf v1.3.2 - github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/googleapis/gnostic v0.2.0 // indirect github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect @@ -59,13 +58,12 @@ require ( google.golang.org/grpc v1.22.0 gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/api v0.0.0-20190313235455-40a48860b5ab - k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 - k8s.io/client-go v11.0.0+incompatible + k8s.io/api v0.0.0-20190620084959-7cf5895f2711 + k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 + k8s.io/client-go v0.0.0-20190620085101-78d2af792bab k8s.io/klog v0.3.3 k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 // indirect k8s.io/utils v0.0.0-20190529001817-6999998975a7 // indirect - sigs.k8s.io/yaml v1.1.0 // indirect ) replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.15 diff --git a/go.sum b/go.sum index 203cbce4b2c..5d054cb1c3d 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= +github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= @@ -15,16 +16,6 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.20.15 h1:y9ts8MJhB7ReUidS6Rq+0KxdFeL01J+pmOlGq6YqpiQ= -github.com/aws/aws-sdk-go v1.20.15/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.20.17 h1:ZQ0cM9FpuufVDHlNViD8vD68IkEQL/VUOMwJPBqkaaM= -github.com/aws/aws-sdk-go v1.20.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.20.20 h1:OAR/GtjMOhenkp1NNKr1N1FgIP3mQXHeGbRhvVIAQp0= -github.com/aws/aws-sdk-go v1.20.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.21.0 h1:dRGzi4XZe5GFSJssHkRNUf/hRD/HL8bdqTYa9hpAO8c= -github.com/aws/aws-sdk-go v1.21.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.21.1 h1:IOFDnCEDybcw4V8nbKqyyjBu+vpu7hFYSfZqNuogi7I= -github.com/aws/aws-sdk-go v1.21.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.21.2 h1:CqbWrQzi7s8J2F0TRRdLvTr0+bt5Zxo2IDoFNGsAiUg= github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -50,10 +41,12 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -61,6 +54,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 h1:QdyRyGZWLEvJG5Kw3VcVJvhXJ5tZ1MkRgqpJOEZSySM= @@ -78,11 +73,13 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -95,26 +92,32 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd h1:tkA3C/XTk8iACLOlTez37pL+0iGSYkkRGKdXgJ6ZylM= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -130,6 +133,7 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= @@ -137,6 +141,7 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5i github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -172,11 +177,13 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= @@ -187,6 +194,7 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGps github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= github.com/openzipkin/zipkin-go-opentracing v0.3.5 h1:nZPvd2EmRKP+NzFdSuxZF/FG4Y4W2gn6ugXliTAu9o0= github.com/openzipkin/zipkin-go-opentracing v0.3.5/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= @@ -219,6 +227,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -243,6 +252,7 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -266,6 +276,7 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -280,6 +291,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8 golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -308,8 +320,10 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSF golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -348,8 +362,6 @@ google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 h1:2LhklnAJsRSelbnBrrE5QuRleRDkmOh2JWxOtIX6yec= -gopkg.in/DataDog/dd-trace-go.v1 v1.15.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -358,6 +370,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= @@ -375,17 +388,20 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjGoLSdVOv2DbPaWHJ+ZtgKg= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 h1:uV4S5IB5g4Nvi+TBVNf3e9L4wrirlwYJ6w88jUQxTUw= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= +k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g= +k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 h1:zia7dTzfEtdiSUxi9cXUDsSQH2xE6igmGKyFn2on/9A= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190529001817-6999998975a7 h1:5UOdmwfY+7XsXvo26XeCDu9GhHJPkO1z8Mcz5AHMnOE= k8s.io/utils v0.0.0-20190529001817-6999998975a7/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= From e5b44f4bd3afcafca748b2b13d97dd8874b55a05 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Thu, 25 Jul 2019 14:55:26 -0700 Subject: [PATCH 037/324] Update CONTRIBUTING.md for dependency update methods (go dep -> go mod) (#3048) * Update CONTRIBUTING.md for dependency update methods (go dep -> go mod) Signed-off-by: Yong Tang * Small fix for reveiw comment Signed-off-by: Yong Tang --- CONTRIBUTING.md | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 18bd4a62b5d..7c2adc70915 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -44,25 +44,20 @@ faster. ## Updating Dependencies -We use Golang's [`dep`](https://github.com/golang/dep) as the tool to manage vendor dependencies. -The tool could be obtained through: +We use [Go Modules](https://github.com/golang/go/wiki/Modules) as the tool to manage vendor dependencies. +Use the following to update the version of all dependencies ```sh -$ go get -u github.com/golang/dep/cmd/dep -``` - -Use the following to update the locked versions of all dependencies -```sh -$ make dep-ensure +$ go get -u ``` After the dependencies have been updated or added, you might run the following to -prune vendored packages: +cleanup the go module files: ```sh -$ dep prune +$ go mod tidy ``` -Please refer to Golang's [`dep`](https://github.com/golang/dep) for more details. +Please refer to [Go Modules](https://github.com/golang/go/wiki/Modules) for more details. # Thank You From cfc4948f92f44a09a15117f0120a8dcf97e61053 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 26 Jul 2019 07:10:49 +0000 Subject: [PATCH 038/324] More note tweaks; couple of typos for 1.6.0 (#3049) Signed-off-by: Miek Gieben --- notes/coredns-1.6.0.md | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/notes/coredns-1.6.0.md b/notes/coredns-1.6.0.md index 28b1070028f..8ddf272bae9 100644 --- a/notes/coredns-1.6.0.md +++ b/notes/coredns-1.6.0.md @@ -15,15 +15,16 @@ The `-cpu` flag is removed from this version. This release sports changes in the *file* plugin. A speed up in the *log* plugin and fixes in the *cache* plugin. -Upcoming deprecation: the kubernetes *federation* plugin will be moved to -github.com/coredns/federation. This is likely to happen in CoreDNS 1.7.0. +Upcoming deprecation: the kubernetes *federation* plugin [will be moved +to](https://github.com/coredns/coredns/issues/3041) github.com/coredns/federation. This is likely to +happen in CoreDNS 1.7.0. # Plugins -* The [*file*](/plugins/file) a lot of bug fixes and it loads lazily on start, i.e. if the zonefile +* The [*file*](/plugins/file) got lot of bug fixes and it now loads zones lazily on start, i.e. if the zonefile does not exist, it keeps trying with every reload period. * The [*cache*](/plugins/cache) fixes a race. -* Multiple fixes in the [*route53*](/plugins/route53). +* Multiple fixes in the [*route53*](/plugins/route53) plugin. * And the [*kubernetes*](/plugins/kubernetes) removes the `resyncperiod` option. ## Brought to You By @@ -32,17 +33,17 @@ github.com/coredns/federation. This is likely to happen in CoreDNS 1.7.0. ## Noteworthy Changes -plugin/file: Simplify locking (https://github.com/coredns/coredns/pull/3024) -plugin/file: New zone should have zero records (https://github.com/coredns/coredns/pull/3025) -plugin/file: Rename do to walk and cleanup and document (https://github.com/coredns/coredns/pull/2987) -plugin/file: Fix setting ReloadInterval (https://github.com/coredns/coredns/pull/3017) -plugin/file: Make non-existent file non-fatal (https://github.com/coredns/coredns/pull/2955) -plugin/metrics: Fix response_rcode_count_total metric (https://github.com/coredns/coredns/pull/3029) -pkg/cache: Fix race in Add() and Evict() (https://github.com/coredns/coredns/pull/3013) -plugin/route53: Fix IAM credential file (https://github.com/coredns/coredns/pull/2983) -plugin/route53: Fix multiple credentials in route53 (https://github.com/coredns/coredns/pull/2859) -pkg/replacer: Evaluate format once and improve perf by ~3x (https://github.com/coredns/coredns/pull/3002) -plugin/log: Fix log plugin benchmark and slightly improve performance (https://github.com/coredns/coredns/pull/3004) -core: Scrub: TC bit is always set (https://github.com/coredns/coredns/pull/3001) -plugin/rewrite: Fix domain length validation (https://github.com/coredns/coredns/pull/2995) -plugin/kubernetes: Remove resyncperiod (https://github.com/coredns/coredns/pull/2923) +* core: Scrub: TC bit is always set (https://github.com/coredns/coredns/pull/3001) +* plugin/file: Simplify locking (https://github.com/coredns/coredns/pull/3024) +* plugin/file: New zone should have zero records (https://github.com/coredns/coredns/pull/3025) +* plugin/file: Rename do to walk and cleanup and document (https://github.com/coredns/coredns/pull/2987) +* plugin/file: Fix setting ReloadInterval (https://github.com/coredns/coredns/pull/3017) +* plugin/file: Make non-existent file non-fatal (https://github.com/coredns/coredns/pull/2955) +* plugin/metrics: Fix response_rcode_count_total metric (https://github.com/coredns/coredns/pull/3029) +* pkg/cache: Fix race in Add() and Evict() (https://github.com/coredns/coredns/pull/3013) +* plugin/route53: Fix IAM credential file (https://github.com/coredns/coredns/pull/2983) +* plugin/route53: Fix multiple credentials in route53 (https://github.com/coredns/coredns/pull/2859) +* pkg/replacer: Evaluate format once and improve perf by ~3x (https://github.com/coredns/coredns/pull/3002) +* plugin/log: Fix log plugin benchmark and slightly improve performance (https://github.com/coredns/coredns/pull/3004) +* plugin/rewrite: Fix domain length validation (https://github.com/coredns/coredns/pull/2995) +* plugin/kubernetes: Remove resyncperiod (https://github.com/coredns/coredns/pull/2923) From 7a3371d740065d4a0e321abe27fe090300a4ce4f Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 27 Jul 2019 11:47:55 +0000 Subject: [PATCH 039/324] plugin/file: allow README.md testing (#3052) * Fix corefile usage * plugin/file: allow README.md testing Allow readme testing for the file plugin and fix bugs that where found: * the reader wasn't reset when re-reading the same io.reader for a different origin. Signed-off-by: Miek Gieben * Update test/example_test.go Co-Authored-By: Michael Grosser --- plugin/file/README.md | 10 +++++----- plugin/file/file.go | 2 +- plugin/file/setup.go | 1 + test/example_test.go | 21 +++++++++++---------- 4 files changed, 18 insertions(+), 16 deletions(-) diff --git a/plugin/file/README.md b/plugin/file/README.md index 5322a8ac62b..58cfe9586ff 100644 --- a/plugin/file/README.md +++ b/plugin/file/README.md @@ -44,7 +44,7 @@ file DBFILE [ZONES... ] { Load the `example.org` zone from `example.org.signed` and allow transfers to the internet, but send notifies to 10.240.1.1 -~~~ txt +~~~ corefile example.org { file example.org.signed { transfer to * @@ -55,7 +55,7 @@ example.org { Or use a single zone file for multiple zones: -~~~ txt +~~~ corefile . { file example.org.signed example.org example.net { transfer to * @@ -67,7 +67,7 @@ Or use a single zone file for multiple zones: Note that if you have a configuration like the following you may run into a problem of the origin not being correctly recognized: -~~~ txt +~~~ corefile . { file db.example.org } @@ -78,7 +78,7 @@ which, in this case, is the root zone. Any contents of `db.example.org` will the origin set; this may or may not do what you want. It's better to be explicit here and specify the correct origin. This can be done in two ways: -~~~ txt +~~~ corefile . { file db.example.org example.org } @@ -86,7 +86,7 @@ It's better to be explicit here and specify the correct origin. This can be done Or -~~~ txt +~~~ corefile example.org { file db.example.org } diff --git a/plugin/file/file.go b/plugin/file/file.go index 169720e64af..c63fa43f535 100644 --- a/plugin/file/file.go +++ b/plugin/file/file.go @@ -145,7 +145,7 @@ func Parse(f io.Reader, origin, fileName string, serial int64) (*Zone, error) { } } if !seenSOA { - return nil, fmt.Errorf("file %q has no SOA record", fileName) + return nil, fmt.Errorf("file %q has no SOA record for origin %s", fileName, origin) } return z, nil diff --git a/plugin/file/setup.go b/plugin/file/setup.go index 162db6d657c..ce78e815e1c 100644 --- a/plugin/file/setup.go +++ b/plugin/file/setup.go @@ -87,6 +87,7 @@ func fileParse(c *caddy.Controller) (Zones, error) { origins[i] = plugin.Host(origins[i]).Normalize() z[origins[i]] = NewZone(origins[i], fileName) if openErr == nil { + reader.Seek(0, 0) zone, err := Parse(reader, origins[i], fileName, 0) if err == nil { z[origins[i]] = zone diff --git a/test/example_test.go b/test/example_test.go index 852143fe8a2..9d8dec8329e 100644 --- a/test/example_test.go +++ b/test/example_test.go @@ -2,14 +2,15 @@ package test const exampleOrg = `; example.org test file $TTL 3600 -example.org. IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600 -example.org. IN NS b.iana-servers.net. -example.org. IN NS a.iana-servers.net. -example.org. IN A 127.0.0.1 -example.org. IN A 127.0.0.2 -short.example.org. 1 IN A 127.0.0.3 -*.w.example.org. IN TXT "Wildcard" -a.b.c.w.example.org. IN TXT "Not a wildcard" -cname.example.org. IN CNAME www.example.net. -service.example.org. IN SRV 8080 10 10 example.org. +@ IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600 +@ IN NS b.iana-servers.net. +@ IN NS a.iana-servers.net. +@ IN A 127.0.0.1 +@ IN A 127.0.0.2 +short 1 IN A 127.0.0.3 + +*.w 3600 IN TXT "Wildcard" +a.b.c.w IN TXT "Not a wildcard" +cname IN CNAME www.example.net. +service IN SRV 8080 10 10 @ ` From 92a636df53c16047a7523d499811ac0df41edaf3 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 27 Jul 2019 15:06:50 +0000 Subject: [PATCH 040/324] plugin/file: z.Expired needs be read under a rlock (#3056) Read lock before reading the Expired field of a zone. Fixes: #3053 Signed-off-by: Miek Gieben --- plugin/file/file.go | 5 ++++- plugin/file/zone.go | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/plugin/file/file.go b/plugin/file/file.go index c63fa43f535..8e10499fa68 100644 --- a/plugin/file/file.go +++ b/plugin/file/file.go @@ -69,7 +69,10 @@ func (f File) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (i return dns.RcodeSuccess, nil } - if z.Expired != nil && *z.Expired { + z.RLock() + exp := z.Expired + z.RUnlock() + if exp != nil && *exp { log.Errorf("Zone %s is expired", zone) return dns.RcodeServerFailure, nil } diff --git a/plugin/file/zone.go b/plugin/file/zone.go index 1fcfc5d332b..d3adca29e25 100644 --- a/plugin/file/zone.go +++ b/plugin/file/zone.go @@ -22,13 +22,13 @@ type Zone struct { file string *tree.Tree Apex + Expired *bool sync.RWMutex TransferTo []string StartupOnce sync.Once TransferFrom []string - Expired *bool ReloadInterval time.Duration reloadShutdown chan bool From b209cb162c547ca7520a0dbe82b12c34673de2e2 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 28 Jul 2019 10:02:38 +0000 Subject: [PATCH 041/324] contrib: add words on adding new plugins (#3057) * contrib: add words on adding new plugins New plugins are usually dropped in one giant ball of code, add some words on how this can be approached and made to work faster. (will at least give us something to point at) Signed-off-by: Miek Gieben * Update CONTRIBUTING.md Co-Authored-By: Michael Grosser * Update CONTRIBUTING.md Co-Authored-By: Michael Grosser * Update CONTRIBUTING.md Co-Authored-By: Michael Grosser * Update CONTRIBUTING.md * Update CONTRIBUTING.md Co-Authored-By: Michael Grosser --- CONTRIBUTING.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7c2adc70915..5cfccf49e3a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,6 +42,16 @@ If possible make a pull request as small as possible, or submit multiple pull re feature. Smaller means: easier to understand and review. This in turn means things can be merged faster. +## New Plugins + +A new plugin is (usually) about 1000 lines of Go. This includes tests and some plugin boiler +plate. This is a considerable amount of code and will take time to review. To prevent too much back and +forth it is advisable to start with the plugin's `README.md`; This will be it's main documentation and will help +nail down the correct name of the plugin and its various config options. + +From there it can work its way through the rest (`setup.go`, the `ServeDNS` handler function, +etc.). Doing this will help the reviewers, as each chunk of code is relatively small. + ## Updating Dependencies We use [Go Modules](https://github.com/golang/go/wiki/Modules) as the tool to manage vendor dependencies. From 4ecc9c4c430bfe607c6f6198f5df69b3babbbd17 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 28 Jul 2019 10:02:52 +0000 Subject: [PATCH 042/324] release: coredns 1.6.0 release (#3058) * fix the notes and set release date to today * run a 'make -f Makefile.doc' to generate the manal pages. Signed-off-by: Miek Gieben --- man/coredns-any.7 | 2 +- man/coredns-autopath.7 | 2 +- man/coredns-cache.7 | 2 +- man/coredns-cancel.7 | 2 +- man/coredns-chaos.7 | 2 +- man/coredns-debug.7 | 2 +- man/coredns-erratic.7 | 2 +- man/coredns-file.7 | 6 +++++- man/coredns-forward.7 | 4 ++-- man/coredns-grpc.7 | 4 ++-- man/coredns-health.7 | 2 +- man/coredns-hosts.7 | 13 ++++++++----- man/coredns-kubernetes.7 | 1 - man/coredns-metrics.7 | 2 +- man/coredns-pprof.7 | 2 +- man/coredns-ready.7 | 2 +- man/coredns-rewrite.7 | 2 +- man/coredns-route53.7 | 9 +++++---- man/coredns-tls.7 | 2 +- notes/coredns-1.6.0.md | 35 ++++++++++++++++++++++++----------- 20 files changed, 59 insertions(+), 39 deletions(-) diff --git a/man/coredns-any.7 b/man/coredns-any.7 index 6d8f795da42..286849750ff 100644 --- a/man/coredns-any.7 +++ b/man/coredns-any.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ANY" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ANY" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-autopath.7 b/man/coredns-autopath.7 index 9150dce0264..8211407f2e0 100644 --- a/man/coredns-autopath.7 +++ b/man/coredns-autopath.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-AUTOPATH" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-AUTOPATH" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-cache.7 b/man/coredns-cache.7 index 4388d8beb22..daa773bf53f 100644 --- a/man/coredns-cache.7 +++ b/man/coredns-cache.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CACHE" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CACHE" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-cancel.7 b/man/coredns-cancel.7 index 2c6f095c38c..a15bc490c75 100644 --- a/man/coredns-cancel.7 +++ b/man/coredns-cancel.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CANCEL" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CANCEL" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-chaos.7 b/man/coredns-chaos.7 index 31d2655a21a..e55f42a4dc2 100644 --- a/man/coredns-chaos.7 +++ b/man/coredns-chaos.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CHAOS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CHAOS" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-debug.7 b/man/coredns-debug.7 index 7880e9fe4ac..ce1da734ce3 100644 --- a/man/coredns-debug.7 +++ b/man/coredns-debug.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-DEBUG" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-DEBUG" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-erratic.7 b/man/coredns-erratic.7 index 06be9b3d7a5..5c025b3bac9 100644 --- a/man/coredns-erratic.7 +++ b/man/coredns-erratic.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ERRATIC" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ERRATIC" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-file.7 b/man/coredns-file.7 index 8c39d79c051..5cc685b10e5 100644 --- a/man/coredns-file.7 +++ b/man/coredns-file.7 @@ -7,7 +7,7 @@ .SH "DESCRIPTION" .PP -The file plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists +The \fIfile\fP plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists on disk. If the zone file contains signatures (i.e., is signed using DNSSEC), correct DNSSEC answers are returned. Only NSEC is supported! If you use this setup \fIyou\fP are responsible for re-signing the zonefile. @@ -138,3 +138,7 @@ example.org { .fi .RE +.SH "ALSO SEE" +.PP +See the \fIloadbalance\fP plugin if you need simple record shuffling. + diff --git a/man/coredns-forward.7 b/man/coredns-forward.7 index baf4e87b700..2180506c9b8 100644 --- a/man/coredns-forward.7 +++ b/man/coredns-forward.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-FORWARD" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FORWARD" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -149,7 +149,7 @@ If monitoring is enabled (via the \fIprometheus\fP directive) then the following .IP \(bu 4 \fB\fCcoredns_forward_request_count_total{to}\fR - query count per upstream. .IP \(bu 4 -\fB\fCcoredns_forward_response_rcode_total{to, rcode}\fR - count of RCODEs per upstream. +\fB\fCcoredns_forward_response_rcode_count_total{to, rcode}\fR - count of RCODEs per upstream. .IP \(bu 4 \fB\fCcoredns_forward_healthcheck_failure_count_total{to}\fR - number of failed health checks per upstream. .IP \(bu 4 diff --git a/man/coredns-grpc.7 b/man/coredns-grpc.7 index 29d9a4a9eb7..17516d06f13 100644 --- a/man/coredns-grpc.7 +++ b/man/coredns-grpc.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-GRPC" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-GRPC" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -97,7 +97,7 @@ If monitoring is enabled (via the \fIprometheus\fP directive) then the following .IP \(bu 4 \fB\fCcoredns_grpc_request_count_total{to}\fR - query count per upstream. .IP \(bu 4 -\fB\fCcoredns_grpc_response_rcode_total{to, rcode}\fR - count of RCODEs per upstream. +\fB\fCcoredns_grpc_response_rcode_count_total{to, rcode}\fR - count of RCODEs per upstream. and we are randomly (this always uses the \fB\fCrandom\fR policy) spraying to an upstream. diff --git a/man/coredns-health.7 b/man/coredns-health.7 index 8a114089c97..ccede9d5f2b 100644 --- a/man/coredns-health.7 +++ b/man/coredns-health.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-HEALTH" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HEALTH" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-hosts.7 b/man/coredns-hosts.7 index 93ad19d3bb9..e5c43dd4331 100644 --- a/man/coredns-hosts.7 +++ b/man/coredns-hosts.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-HOSTS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HOSTS" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -13,15 +13,17 @@ plugin only supports A, AAAA, and PTR records. The hosts plugin can be used with available hosts files that block access to advertising servers. .PP -The plugin reloads the content of the hosts file every 5 seconds. Upon reload, CoreDNS will use the new definitions. -Should the file be deleted, any inlined content will continue to be served. When the file is restored, it will then again be used. +The plugin reloads the content of the hosts file every 5 seconds. Upon reload, CoreDNS will use the +new definitions. Should the file be deleted, any inlined content will continue to be served. When +the file is restored, it will then again be used. .PP This plugin can only be used once per Server Block. .SH "THE HOSTS FILE" .PP -Commonly the entries are of the form \fB\fCIP_address canonical_hostname [aliases...]\fR as explained by the hosts(5) man page. +Commonly the entries are of the form \fB\fCIP_address canonical_hostname [aliases...]\fR as explained by +the hosts(5) man page. .PP Examples: @@ -41,7 +43,8 @@ fdfc:a744:27b5:3b0e::1 example.com example .SS "PTR RECORDS" .PP -PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file entries) and cannot be created manually. +PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file +entries) and cannot be created manually. .SH "SYNTAX" .PP diff --git a/man/coredns-kubernetes.7 b/man/coredns-kubernetes.7 index c430a6c128e..7b21e961cbd 100644 --- a/man/coredns-kubernetes.7 +++ b/man/coredns-kubernetes.7 @@ -64,7 +64,6 @@ kubernetes [ZONES...] { .fi .RE - .IP \(bu 4 \fB\fCendpoint\fR specifies the \fBURL\fP for a remote k8s API endpoint. If omitted, it will connect to k8s in-cluster using the cluster service account. diff --git a/man/coredns-metrics.7 b/man/coredns-metrics.7 index 057c60f99d7..31e029c90ea 100644 --- a/man/coredns-metrics.7 +++ b/man/coredns-metrics.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-METRICS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-METRICS" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-pprof.7 b/man/coredns-pprof.7 index f5ef12858f8..b96ba86dba6 100644 --- a/man/coredns-pprof.7 +++ b/man/coredns-pprof.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-PPROF" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-PPROF" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-ready.7 b/man/coredns-ready.7 index e0d36358f6d..95c499bdedc 100644 --- a/man/coredns-ready.7 +++ b/man/coredns-ready.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-READY" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-READY" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-rewrite.7 b/man/coredns-rewrite.7 index d3adccce45e..a85a811717e 100644 --- a/man/coredns-rewrite.7 +++ b/man/coredns-rewrite.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-REWRITE" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-REWRITE" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-route53.7 b/man/coredns-route53.7 index 68e10a4ff08..943f1433f9e 100644 --- a/man/coredns-route53.7 +++ b/man/coredns-route53.7 @@ -19,7 +19,7 @@ The route53 plugin can be used when coredns is deployed on AWS or elsewhere. .nf route53 [ZONE:HOSTED\_ZONE\_ID...] { - [aws\_access\_key AWS\_ACCESS\_KEY\_ID AWS\_SECRET\_ACCESS\_KEY] + aws\_access\_key [AWS\_ACCESS\_KEY\_ID AWS\_SECRET\_ACCESS\_KEY] credentials PROFILE [FILENAME] fallthrough [ZONES...] } @@ -48,16 +48,17 @@ zone. \fBFILENAME\fP AWS credentials filename. Defaults to \fB\fC~/.aws/credentials\fR are used. .IP \(bu 4 \fB\fCfallthrough\fR If zone matches and no record can be generated, pass request to the next plugin. -If \fB[ZONES...]\fP is omitted, then fallthrough happens for all zones for which the plugin is +If \fBZONES\fP is omitted, then fallthrough happens for all zones for which the plugin is authoritative. If specific zones are listed (for example \fB\fCin-addr.arpa\fR and \fB\fCip6.arpa\fR), then only queries for those zones will be subject to fallthrough. .IP \(bu 4 -\fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block +\fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration +block. .SH "EXAMPLES" .PP -Enable route53 with implicit AWS credentials and and resolve CNAMEs via 10.0.0.1: +Enable route53 with implicit AWS credentials and resolve CNAMEs via 10.0.0.1: .PP .RS diff --git a/man/coredns-tls.7 b/man/coredns-tls.7 index 5674afca65b..e06a3d6a16d 100644 --- a/man/coredns-tls.7 +++ b/man/coredns-tls.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-TLS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TLS" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/notes/coredns-1.6.0.md b/notes/coredns-1.6.0.md index 8ddf272bae9..f5414fccebd 100644 --- a/notes/coredns-1.6.0.md +++ b/notes/coredns-1.6.0.md @@ -3,7 +3,7 @@ title = "CoreDNS-1.6.0 Release" description = "CoreDNS-1.6.0 Release Notes." tags = ["Release", "1.6.0", "Notes"] release = "1.6.0" -date = 2019-07-03T07:35:47+01:00 +date = 2019-07-28T14:35:47+01:00 author = "coredns" +++ @@ -13,7 +13,7 @@ The CoreDNS team has released The `-cpu` flag is removed from this version. This release sports changes in the *file* plugin. A speed up in the *log* plugin and fixes in the -*cache* plugin. +*cache* and *hosts* plugins. Upcoming deprecation: the kubernetes *federation* plugin [will be moved to](https://github.com/coredns/coredns/issues/3041) github.com/coredns/federation. This is likely to @@ -26,24 +26,37 @@ happen in CoreDNS 1.7.0. * The [*cache*](/plugins/cache) fixes a race. * Multiple fixes in the [*route53*](/plugins/route53) plugin. * And the [*kubernetes*](/plugins/kubernetes) removes the `resyncperiod` option. +* The [*host*](/plugins/host) appended entries from /etc/hosts on each (re-)parse, instead of + overwriting them. +* Speed ups in the [*log*](/plugins/log) plugin. ## Brought to You By -* Wonderful people +Anshul Sharma, +Charlie Vieth, +Chris O'Haver, +Christian Muehlhaeuser, +Erfan Besharat, +Jintao Zhang, +Mat Lowery, +Miek Gieben, +Ruslan Drozhdzh, +Yong Tang. ## Noteworthy Changes * core: Scrub: TC bit is always set (https://github.com/coredns/coredns/pull/3001) -* plugin/file: Simplify locking (https://github.com/coredns/coredns/pull/3024) -* plugin/file: New zone should have zero records (https://github.com/coredns/coredns/pull/3025) -* plugin/file: Rename do to walk and cleanup and document (https://github.com/coredns/coredns/pull/2987) +* pkg/cache: Fix race in Add() and Evict() (https://github.com/coredns/coredns/pull/3013) +* pkg/replacer: Evaluate format once and improve perf by ~3x (https://github.com/coredns/coredns/pull/3002) * plugin/file: Fix setting ReloadInterval (https://github.com/coredns/coredns/pull/3017) * plugin/file: Make non-existent file non-fatal (https://github.com/coredns/coredns/pull/2955) +* plugin/file: New zone should have zero records (https://github.com/coredns/coredns/pull/3025) +* plugin/file: Rename do to walk and cleanup and document (https://github.com/coredns/coredns/pull/2987) +* plugin/file: Simplify locking (https://github.com/coredns/coredns/pull/3024) +* plugin/host: don't append the names when reparsing hosts file (https://github.com/coredns/coredns/pull/3045) +* plugin/kubernetes: Remove resyncperiod (https://github.com/coredns/coredns/pull/2923) +* plugin/log: Fix log plugin benchmark and slightly improve performance (https://github.com/coredns/coredns/pull/3004) * plugin/metrics: Fix response_rcode_count_total metric (https://github.com/coredns/coredns/pull/3029) -* pkg/cache: Fix race in Add() and Evict() (https://github.com/coredns/coredns/pull/3013) +* plugin/rewrite: Fix domain length validation (https://github.com/coredns/coredns/pull/2995) * plugin/route53: Fix IAM credential file (https://github.com/coredns/coredns/pull/2983) * plugin/route53: Fix multiple credentials in route53 (https://github.com/coredns/coredns/pull/2859) -* pkg/replacer: Evaluate format once and improve perf by ~3x (https://github.com/coredns/coredns/pull/3002) -* plugin/log: Fix log plugin benchmark and slightly improve performance (https://github.com/coredns/coredns/pull/3004) -* plugin/rewrite: Fix domain length validation (https://github.com/coredns/coredns/pull/2995) -* plugin/kubernetes: Remove resyncperiod (https://github.com/coredns/coredns/pull/2923) From 5a2a422b38b5c446b7dea959befdb83e0de9db31 Mon Sep 17 00:00:00 2001 From: Michael Grosser Date: Sun, 28 Jul 2019 10:03:46 +0000 Subject: [PATCH 043/324] Fix typo --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5cfccf49e3a..8774238a07b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -46,7 +46,7 @@ faster. A new plugin is (usually) about 1000 lines of Go. This includes tests and some plugin boiler plate. This is a considerable amount of code and will take time to review. To prevent too much back and -forth it is advisable to start with the plugin's `README.md`; This will be it's main documentation and will help +forth it is advisable to start with the plugin's `README.md`; This will be its main documentation and will help nail down the correct name of the plugin and its various config options. From there it can work its way through the rest (`setup.go`, the `ServeDNS` handler function, From 0a218d3cf4aeab36a7931924aaf22426370fa392 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 28 Jul 2019 20:00:28 +0000 Subject: [PATCH 044/324] Tag v1.6.0 (#3059) Signed-off-by: Miek Gieben --- coremain/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coremain/version.go b/coremain/version.go index 9c622c58b20..182af29afa4 100644 --- a/coremain/version.go +++ b/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.5.2" + CoreVersion = "1.6.0" coreName = "CoreDNS" serverType = "dns" ) From 79f352d2b3e6bd8ce3988c480b0c11463be08cc3 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Mon, 29 Jul 2019 21:35:29 +0800 Subject: [PATCH 045/324] fix typos in coredns-1.5.2.md (#3063) --- notes/coredns-1.5.2.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notes/coredns-1.5.2.md b/notes/coredns-1.5.2.md index 7e39588500e..5e5c3ff253a 100644 --- a/notes/coredns-1.5.2.md +++ b/notes/coredns-1.5.2.md @@ -32,7 +32,7 @@ Yong Tang. ## Noteworthy Changes -* plugin/file: close correctlty after AXFR (https://github.com/coredns/coredns/pull/2943) +* plugin/file: close correctly after AXFR (https://github.com/coredns/coredns/pull/2943) * plugin/file: load secondary zones lazily on startup (https://github.com/coredns/coredns/pull/2944) * plugin/template: support metadata (https://github.com/coredns/coredns/pull/2958) * build: Update Caddy to 1.0.1, and update import path (https://github.com/coredns/coredns/pull/2961) From f50e1dc912c79caf1c2fdcb8b156e75a9052a59c Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2019 06:55:23 -0700 Subject: [PATCH 046/324] build(deps): bump google.golang.org/grpc from 1.22.0 to 1.22.1 (#3062) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.22.0 to 1.22.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.22.0...v1.22.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 203777f02cd..4c4c12f2242 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect - google.golang.org/grpc v1.22.0 + google.golang.org/grpc v1.22.1 gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 diff --git a/go.sum b/go.sum index 5d054cb1c3d..7d27ba97b44 100644 --- a/go.sum +++ b/go.sum @@ -362,6 +362,8 @@ google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From c3516279ed95b77a526a4268ea0e8c08c914bc2f Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2019 06:55:37 -0700 Subject: [PATCH 047/324] build(deps): bump github.com/aws/aws-sdk-go from 1.21.2 to 1.21.6 (#3060) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.21.2 to 1.21.6. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.21.2...v1.21.6) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 4c4c12f2242..60425c1b745 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.21.2 + github.com/aws/aws-sdk-go v1.21.6 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index 7d27ba97b44..e1b65fa0f2e 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,8 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.21.2 h1:CqbWrQzi7s8J2F0TRRdLvTr0+bt5Zxo2IDoFNGsAiUg= github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.21.6 h1:3GuIm55Uls52aQIDGBnSEZbk073jpasfQyeM5eZU61Q= +github.com/aws/aws-sdk-go v1.21.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 7122fda09d508ac4a0962a97667c3ddef6892e6c Mon Sep 17 00:00:00 2001 From: dzzg Date: Mon, 29 Jul 2019 22:58:45 +0800 Subject: [PATCH 048/324] cleanup: error message typos in setup_test.go (#3065) Signed-off-by: zhengguang zhu --- plugin/etcd/setup_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/etcd/setup_test.go b/plugin/etcd/setup_test.go index 20379628b01..8a84d0fd105 100644 --- a/plugin/etcd/setup_test.go +++ b/plugin/etcd/setup_test.go @@ -105,12 +105,12 @@ func TestSetupEtcd(t *testing.T) { if !test.shouldErr { if test.username != "" { if etcd.Client.Username != test.username { - t.Errorf("Etcd username not correctly set for input %s. Excpeted: '%+v', actual: '%+v'", test.input, test.username, etcd.Client.Username) + t.Errorf("Etcd username not correctly set for input %s. Expected: '%+v', actual: '%+v'", test.input, test.username, etcd.Client.Username) } } if test.password != "" { if etcd.Client.Password != test.password { - t.Errorf("Etcd password not correctly set for input %s. Excpeted: '%+v', actual: '%+v'", test.input, test.password, etcd.Client.Password) + t.Errorf("Etcd password not correctly set for input %s. Expected: '%+v', actual: '%+v'", test.input, test.password, etcd.Client.Password) } } } From 6cea8869cf0f4c8930388f6b1cf270ce55095c86 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2019 08:10:42 -0700 Subject: [PATCH 049/324] build(deps): bump gopkg.in/DataDog/dd-trace-go.v1 from 1.16.0 to 1.16.1 (#3061) Bumps [gopkg.in/DataDog/dd-trace-go.v1](https://github.com/DataDog/dd-trace-go) from 1.16.0 to 1.16.1. - [Release notes](https://github.com/DataDog/dd-trace-go/releases) - [Commits](https://github.com/DataDog/dd-trace-go/compare/v1.16.0...v1.16.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 60425c1b745..7be99ea197d 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,7 @@ require ( golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.22.1 - gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 diff --git a/go.sum b/go.sum index e1b65fa0f2e..cf7f69d9917 100644 --- a/go.sum +++ b/go.sum @@ -368,6 +368,8 @@ google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4nH2UWo= +gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= From 4fda9535d2f446de9dbaf8b397b0d871bdc882e5 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 30 Jul 2019 16:35:07 +0000 Subject: [PATCH 050/324] plugin/kubernetes: remove some of the klog setup (#3054) I don't believe this is actually needed (anymore). The: os.Stderr = os.Stdout is a crazy hack that def. needs to go. Signed-off-by: Miek Gieben --- plugin/kubernetes/setup.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go index d97bc9139cb..1b0f2076994 100644 --- a/plugin/kubernetes/setup.go +++ b/plugin/kubernetes/setup.go @@ -2,7 +2,6 @@ package kubernetes import ( "errors" - "flag" "fmt" "os" "strconv" @@ -36,15 +35,7 @@ import ( var log = clog.NewWithPlugin("kubernetes") func init() { - // Kubernetes plugin uses the kubernetes library, which now uses klog, we must set and parse this flag - // so we don't log to the filesystem, which can fill up and crash CoreDNS indirectly by calling os.Exit(). - // We also set: os.Stderr = os.Stdout in the setup function below so we output to standard out; as we do for - // all CoreDNS logging. We can't do *that* in the init function, because we, when starting, also barf some - // things to stderr. - klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(klogFlags) - logtostderr := klogFlags.Lookup("logtostderr") - logtostderr.Value.Set("true") + klog.SetOutput(os.Stdout) caddy.RegisterPlugin("kubernetes", caddy.Plugin{ ServerType: "dns", @@ -53,9 +44,6 @@ func init() { } func setup(c *caddy.Controller) error { - // See comment in the init function. - os.Stderr = os.Stdout - k, err := kubernetesParse(c) if err != nil { return plugin.Error("kubernetes", err) From 367d285765fef0f030be1b59e5d6bf5d5a008e45 Mon Sep 17 00:00:00 2001 From: Erik Wilson Date: Tue, 30 Jul 2019 15:44:16 -0700 Subject: [PATCH 051/324] plugin/reload: Graceful reload of imported files (#3068) Automatically submitted. --- plugin/reload/README.md | 5 +++-- plugin/reload/reload.go | 25 +++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/plugin/reload/README.md b/plugin/reload/README.md index c1f5d206b2f..89a49f14ae3 100644 --- a/plugin/reload/README.md +++ b/plugin/reload/README.md @@ -89,8 +89,9 @@ closed in step 1; so the health endpoint is broken. The same can hopen in the pr In general be careful with assigning new port and expecting reload to work fully. -Also any `import` statement is not discovered by this plugin. This means if any of these imported files -changes the *reload* plugin is ignorant of that fact. +In CoreDNS v1.6.0 and earlier any `import` statements are not discovered by this plugin. +This means if any of these imported files changes the *reload* plugin is ignorant of that fact. +CoreDNS v1.7.0 and later does parse the Corefile and supports detecting changes in imported files. ## Metrics diff --git a/plugin/reload/reload.go b/plugin/reload/reload.go index f6e676d880c..59e040479b9 100644 --- a/plugin/reload/reload.go +++ b/plugin/reload/reload.go @@ -1,11 +1,14 @@ package reload import ( + "bytes" "crypto/md5" + "encoding/json" "sync" "time" "github.com/caddyserver/caddy" + "github.com/caddyserver/caddy/caddyfile" ) // reload periodically checks if the Corefile has changed, and reloads if so @@ -46,6 +49,14 @@ func (r *reload) interval() time.Duration { return r.dur } +func parse(corefile caddy.Input) ([]byte, error) { + serverBlocks, err := caddyfile.Parse(corefile.Path(), bytes.NewReader(corefile.Body()), nil) + if err != nil { + return nil, err + } + return json.Marshal(serverBlocks) +} + func hook(event caddy.EventName, info interface{}) error { if event != caddy.InstanceStartupEvent { return nil @@ -60,7 +71,12 @@ func hook(event caddy.EventName, info interface{}) error { // this should be an instance. ok to panic if not instance := info.(*caddy.Instance) - md5sum := md5.Sum(instance.Caddyfile().Body()) + parsedCorefile, err := parse(instance.Caddyfile()) + if err != nil { + return err + } + + md5sum := md5.Sum(parsedCorefile) log.Infof("Running configuration MD5 = %x\n", md5sum) go func() { @@ -73,7 +89,12 @@ func hook(event caddy.EventName, info interface{}) error { if err != nil { continue } - s := md5.Sum(corefile.Body()) + parsedCorefile, err := parse(corefile) + if err != nil { + log.Warningf("Corefile parse failed: %s", err) + continue + } + s := md5.Sum(parsedCorefile) if s != md5sum { // Let not try to restart with the same file, even though it is wrong. md5sum = s From b8f40a81509af02b24a7f1eac6489486929b523d Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Tue, 30 Jul 2019 23:44:56 -0700 Subject: [PATCH 052/324] Move github.com/openzipkin/zipkin-go-opentracing => github.com/openzipkin-contrib/zipkin-go-opentracing (#3070) Move github.com/openzipkin/zipkin-go-opentracing => github.com/openzipkin-contrib/zipkin-go-opentracing and run `go mod tidy` Fixes 3069 Signed-off-by: Yong Tang --- go.mod | 3 +-- go.sum | 8 -------- plugin/trace/trace.go | 2 +- 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 7be99ea197d..e77aead100e 100644 --- a/go.mod +++ b/go.mod @@ -33,8 +33,7 @@ require ( github.com/miekg/dns v1.1.15 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 - github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 // indirect - github.com/openzipkin/zipkin-go-opentracing v0.3.5 + github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 github.com/philhofer/fwd v1.0.0 // indirect github.com/pkg/errors v0.8.1 // indirect github.com/prometheus/client_golang v1.0.0 diff --git a/go.sum b/go.sum index cf7f69d9917..4d62d6179c6 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,6 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.21.2 h1:CqbWrQzi7s8J2F0TRRdLvTr0+bt5Zxo2IDoFNGsAiUg= -github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.21.6 h1:3GuIm55Uls52aQIDGBnSEZbk073jpasfQyeM5eZU61Q= github.com/aws/aws-sdk-go v1.21.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -194,8 +192,6 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= -github.com/openzipkin/zipkin-go-opentracing v0.3.5 h1:nZPvd2EmRKP+NzFdSuxZF/FG4Y4W2gn6ugXliTAu9o0= -github.com/openzipkin/zipkin-go-opentracing v0.3.5/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= @@ -362,12 +358,8 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= -gopkg.in/DataDog/dd-trace-go.v1 v1.16.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4nH2UWo= gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/plugin/trace/trace.go b/plugin/trace/trace.go index 90b4632e828..5a95186958c 100644 --- a/plugin/trace/trace.go +++ b/plugin/trace/trace.go @@ -20,7 +20,7 @@ import ( "github.com/miekg/dns" ot "github.com/opentracing/opentracing-go" - zipkin "github.com/openzipkin/zipkin-go-opentracing" + zipkin "github.com/openzipkin-contrib/zipkin-go-opentracing" ) const ( From 7be2226eab684c18b74c5c28db8de9335560ffc3 Mon Sep 17 00:00:00 2001 From: Alan Date: Wed, 31 Jul 2019 14:47:46 +0800 Subject: [PATCH 053/324] fix ineffectual assignment to err (#3066) Signed-off-by: alan --- owners_generate.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/owners_generate.go b/owners_generate.go index 9764179a0be..162cf572292 100644 --- a/owners_generate.go +++ b/owners_generate.go @@ -40,7 +40,10 @@ func main() { o, err = owners(p, o) return err }) - + if err != nil { + log.Fatal(err) + } + // sort it and format it list := []string{} for k := range o { From cd5dcebe9352b6a08687cfdf3ac8115bd7c8dfcc Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 31 Jul 2019 18:18:49 +0000 Subject: [PATCH 054/324] core: log panics (#3072) These are too hidden now. They increase the issue-load, because people don't see them. Add log.Errorf in the core/dnsserver recover routine. Signed-off-by: Miek Gieben --- core/dnsserver/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/dnsserver/server.go b/core/dnsserver/server.go index 55895430414..4a143f9b3e4 100644 --- a/core/dnsserver/server.go +++ b/core/dnsserver/server.go @@ -200,6 +200,7 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) // In case the user doesn't enable error plugin, we still // need to make sure that we stay alive up here if rec := recover(); rec != nil { + log.Errorf("Recovered from panic in server: %q", s.Addr) vars.Panic.Inc() errorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure) } From 45e17c325c16760374455157795d375709ee72ab Mon Sep 17 00:00:00 2001 From: Matt Kulka Date: Wed, 31 Jul 2019 12:09:10 -0700 Subject: [PATCH 055/324] [plugin/route53]: Increase ListResourceRecordSets paging size. (#3073) without the paging parameter set, it will default to 100 records per request. with large enough zones and potentially multiple coredns daemons configured to pull from route 53, this can quickly add up and reach aws global api rate limits (5/sec per root account). increasing paging to max can help reduce the number of requests needed to pull records for a zone without no down side that i am aware of. this helps issue #2353, but probably is not a complete fix. Signed-off-by: Matt Kulka --- plugin/route53/route53.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugin/route53/route53.go b/plugin/route53/route53.go index 02d0b8230c4..0f7c2b1d266 100644 --- a/plugin/route53/route53.go +++ b/plugin/route53/route53.go @@ -248,6 +248,7 @@ func (h *Route53) updateZones(ctx context.Context) error { newZ.Upstream = h.upstream in := &route53.ListResourceRecordSetsInput{ HostedZoneId: aws.String(hostedZone.id), + MaxItems: aws.String("1000"), } err = h.client.ListResourceRecordSetsPagesWithContext(ctx, in, func(out *route53.ListResourceRecordSetsOutput, last bool) bool { From 07f016191d5d42e56f64408751fdb48057c5b930 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 31 Jul 2019 19:37:09 +0000 Subject: [PATCH 056/324] plugin/hosts: create inline map in setup (#3071) * plugin/hosts: create inline map in setup The inline map wasn't create in the setup.go fuction leading to a crash, which is masked by a recover (but leads to a SERVFAIL, and not logging the request). Various other simplifications. host plugin could use some test that actually cover these edgecases. Signed-off-by: Miek Gieben * PR review changes Signed-off-by: Miek Gieben --- plugin/hosts/README.md | 7 ++++++- plugin/hosts/hosts.go | 33 ++++++++++++--------------------- plugin/hosts/setup.go | 11 +++++------ 3 files changed, 23 insertions(+), 28 deletions(-) diff --git a/plugin/hosts/README.md b/plugin/hosts/README.md index 60135e71ab3..b18438cc6b6 100644 --- a/plugin/hosts/README.md +++ b/plugin/hosts/README.md @@ -15,6 +15,9 @@ The plugin reloads the content of the hosts file every 5 seconds. Upon reload, C new definitions. Should the file be deleted, any inlined content will continue to be served. When the file is restored, it will then again be used. +If you want to pass the request to the rest of the plugin chain if there is no match in the *hosts* +plugin, you must specify the `fallthrough` option. + This plugin can only be used once per Server Block. ## The hosts file @@ -60,7 +63,9 @@ hosts [FILE [ZONES...]] { then all of them will be treated as the additional content for hosts file. The specified hosts file path will still be read but entries will be overridden. * `ttl` change the DNS TTL of the records generated (forward and reverse). The default is 3600 seconds (1 hour). -* `reload` change the period between each hostsfile reload. A time of zero seconds disable the feature. Examples of valid durations: "300ms", "1.5h" or "2h45m" are valid duration with units "ns" (nanosecond), "us" (or "µs" for microsecond), "ms" (millisecond), "s" (second), "m" (minute), "h" (hour). +* `reload` change the period between each hostsfile reload. A time of zero seconds disables the + feature. Examples of valid durations: "300ms", "1.5h" or "2h45m". See Go's + [time](https://godoc.org/time). package. * `no_reverse` disable the automatic generation of the `in-addr.arpa` or `ip6.arpa` entries for the hosts * `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. If **[ZONES...]** is omitted, then fallthrough happens for all zones for which the plugin diff --git a/plugin/hosts/hosts.go b/plugin/hosts/hosts.go index 54b090b4d3c..a94c65a7e1e 100644 --- a/plugin/hosts/hosts.go +++ b/plugin/hosts/hosts.go @@ -29,8 +29,8 @@ func (h Hosts) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) ( zone := plugin.Zones(h.Origins).Matches(qname) if zone == "" { - // PTR zones don't need to be specified in Origins - if state.Type() != "PTR" { + // PTR zones don't need to be specified in Origins. + if state.QType() != dns.TypePTR { // if this doesn't match we need to fall through regardless of h.Fallthrough return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) } @@ -56,8 +56,10 @@ func (h Hosts) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) ( if h.Fall.Through(qname) { return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) } - if !h.otherRecordsExist(state.QType(), qname) { - return dns.RcodeNameError, nil + // We want to send an NXDOMAIN, but because of /etc/hosts' setup we don't have a SOA, so we make it REFUSED + // to at least give an answer back to signals we're having problems resolving this. + if !h.otherRecordsExist(qname) { + return dns.RcodeServerFailure, nil } } @@ -70,23 +72,12 @@ func (h Hosts) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) ( return dns.RcodeSuccess, nil } -func (h Hosts) otherRecordsExist(qtype uint16, qname string) bool { - switch qtype { - case dns.TypeA: - if len(h.LookupStaticHostV6(qname)) > 0 { - return true - } - case dns.TypeAAAA: - if len(h.LookupStaticHostV4(qname)) > 0 { - return true - } - default: - if len(h.LookupStaticHostV4(qname)) > 0 { - return true - } - if len(h.LookupStaticHostV6(qname)) > 0 { - return true - } +func (h Hosts) otherRecordsExist(qname string) bool { + if len(h.LookupStaticHostV4(qname)) > 0 { + return true + } + if len(h.LookupStaticHostV6(qname)) > 0 { + return true } return false } diff --git a/plugin/hosts/setup.go b/plugin/hosts/setup.go index 73fd3cec4ae..981ea141f36 100644 --- a/plugin/hosts/setup.go +++ b/plugin/hosts/setup.go @@ -73,13 +73,12 @@ func setup(c *caddy.Controller) error { func hostsParse(c *caddy.Controller) (Hosts, error) { config := dnsserver.GetConfig(c) - options := newOptions() - h := Hosts{ Hostsfile: &Hostsfile{ path: "/etc/hosts", hmap: newMap(), - options: options, + inline: newMap(), + options: newOptions(), }, } @@ -129,7 +128,7 @@ func hostsParse(c *caddy.Controller) (Hosts, error) { case "fallthrough": h.Fall.SetZonesFromArgs(c.RemainingArgs()) case "no_reverse": - options.autoReverse = false + h.options.autoReverse = false case "ttl": remaining := c.RemainingArgs() if len(remaining) < 1 { @@ -142,7 +141,7 @@ func hostsParse(c *caddy.Controller) (Hosts, error) { if ttl <= 0 || ttl > 65535 { return h, c.Errf("ttl provided is invalid") } - options.ttl = uint32(ttl) + h.options.ttl = uint32(ttl) case "reload": remaining := c.RemainingArgs() if len(remaining) != 1 { @@ -155,7 +154,7 @@ func hostsParse(c *caddy.Controller) (Hosts, error) { if reload < 0 { return h, c.Errf("invalid negative duration for reload '%s'", remaining[0]) } - options.reload = reload + h.options.reload = reload default: if len(h.Fall.Zones) == 0 { line := strings.Join(append([]string{c.Val()}, c.RemainingArgs()...), " ") From 3219a2b93a8010af9b5c2263c658815a4494416e Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 1 Aug 2019 12:51:14 +0000 Subject: [PATCH 057/324] Release notes for 1.6.1 (#3080) Add the new (short) notes for the 1.6.1 release. Signed-off-by: Miek Gieben --- notes/coredns-1.6.1.md | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 notes/coredns-1.6.1.md diff --git a/notes/coredns-1.6.1.md b/notes/coredns-1.6.1.md new file mode 100644 index 00000000000..76db2d11759 --- /dev/null +++ b/notes/coredns-1.6.1.md @@ -0,0 +1,37 @@ ++++ +title = "CoreDNS-1.6.1 Release" +description = "CoreDNS-1.6.1 Release Notes." +tags = ["Release", "1.6.1", "Notes"] +release = "1.6.1" +date = 2019-08-02T14:35:47+01:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.1](https://github.com/coredns/coredns/releases/tag/v1.6.1). + +This is a small (bug fix) release. + +# Plugins + +* Fix a panic in the [*hosts*](/plugins/hosts) plugin. +* The [*reload*](/plugins/reload) now detects changes in files imported from the main Corefile. +* [*route53*](/plugins/route53) increases the paging size when talking to the AWS API, this + decreases the chances of getting throttled. + +## Brought to You By + +Alan, +AllenZMC, +dzzg, +Erik Wilson, +Matt Kulka, +Miek Gieben, +Yong Tang. + +## Noteworthy Changes + +core: log panics (https://github.com/coredns/coredns/pull/3072) +plugin/hosts: create inline map in setup (https://github.com/coredns/coredns/pull/3071) +plugin/reload: Graceful reload of imported files (https://github.com/coredns/coredns/pull/3068) +plugin/route53: Increase ListResourceRecordSets paging size. (https://github.com/coredns/coredns/pull/3073) From a01b202b6ace52148dcfdfe8bc723878e0f911f8 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 1 Aug 2019 12:51:37 +0000 Subject: [PATCH 058/324] Fixes races in test and klog (#3079) Various fixes to make things less flaky: * kubernetes: put klog.SetOutput in the setup function, not in the init function to see if that helps * file: make z.Expired a boolean instead of a pointer to a boolean * test: fix TestSecondaryZoneTransfer test, which wasn't actually testing in the right way. It's more right now, but may still be racy (race introduced because a file's lazy loading of zones) Signed-off-by: Miek Gieben --- plugin/file/file.go | 2 +- plugin/file/secondary.go | 4 ++-- plugin/file/secondary_test.go | 2 -- plugin/file/zone.go | 7 ++----- plugin/kubernetes/setup.go | 6 +++--- test/secondary_test.go | 6 +++--- 6 files changed, 11 insertions(+), 16 deletions(-) diff --git a/plugin/file/file.go b/plugin/file/file.go index 8e10499fa68..1aebf6cda0d 100644 --- a/plugin/file/file.go +++ b/plugin/file/file.go @@ -72,7 +72,7 @@ func (f File) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (i z.RLock() exp := z.Expired z.RUnlock() - if exp != nil && *exp { + if exp { log.Errorf("Zone %s is expired", zone) return dns.RcodeServerFailure, nil } diff --git a/plugin/file/secondary.go b/plugin/file/secondary.go index 408ee887a32..6cb571fe54b 100644 --- a/plugin/file/secondary.go +++ b/plugin/file/secondary.go @@ -54,7 +54,7 @@ Transfer: z.Lock() z.Tree = z1.Tree z.Apex = z1.Apex - *z.Expired = false + z.Expired = false z.Unlock() log.Infof("Transferred: %s from %s", z.origin, tr) return nil @@ -129,7 +129,7 @@ Restart: if !retryActive { break } - *z.Expired = true + z.Expired = true case <-retryTicker.C: if !retryActive { diff --git a/plugin/file/secondary_test.go b/plugin/file/secondary_test.go index db98b4f97a6..5c393e4400f 100644 --- a/plugin/file/secondary_test.go +++ b/plugin/file/secondary_test.go @@ -125,7 +125,6 @@ func TestTransferIn(t *testing.T) { defer s.Shutdown() z := new(Zone) - z.Expired = new(bool) z.origin = testZone z.TransferFrom = []string{addrstr} @@ -140,7 +139,6 @@ func TestTransferIn(t *testing.T) { func TestIsNotify(t *testing.T) { z := new(Zone) - z.Expired = new(bool) z.origin = testZone state := newRequest(testZone, dns.TypeSOA) // need to set opcode diff --git a/plugin/file/zone.go b/plugin/file/zone.go index d3adca29e25..9a12ca69f65 100644 --- a/plugin/file/zone.go +++ b/plugin/file/zone.go @@ -22,7 +22,7 @@ type Zone struct { file string *tree.Tree Apex - Expired *bool + Expired bool sync.RWMutex @@ -46,16 +46,13 @@ type Apex struct { // NewZone returns a new zone. func NewZone(name, file string) *Zone { - z := &Zone{ + return &Zone{ origin: dns.Fqdn(name), origLen: dns.CountLabel(dns.Fqdn(name)), file: filepath.Clean(file), Tree: &tree.Tree{}, - Expired: new(bool), reloadShutdown: make(chan bool), } - *z.Expired = false - return z } // Copy copies a zone. diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go index 1b0f2076994..00c69818111 100644 --- a/plugin/kubernetes/setup.go +++ b/plugin/kubernetes/setup.go @@ -19,7 +19,7 @@ import ( "github.com/miekg/dns" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - // Pull this in for logtostderr flag parsing + // Pull this in setting klog's output to stdout "k8s.io/klog" // Excluding azure because it is failing to compile @@ -35,8 +35,6 @@ import ( var log = clog.NewWithPlugin("kubernetes") func init() { - klog.SetOutput(os.Stdout) - caddy.RegisterPlugin("kubernetes", caddy.Plugin{ ServerType: "dns", Action: setup, @@ -44,6 +42,8 @@ func init() { } func setup(c *caddy.Controller) error { + klog.SetOutput(os.Stdout) + k, err := kubernetesParse(c) if err != nil { return plugin.Error("kubernetes", err) diff --git a/test/secondary_test.go b/test/secondary_test.go index bfa1da05b2c..c0df1632b1b 100644 --- a/test/secondary_test.go +++ b/test/secondary_test.go @@ -73,13 +73,13 @@ func TestSecondaryZoneTransfer(t *testing.T) { var r *dns.Msg // This is now async; we we need to wait for it to be transferred. for i := 0; i < 10; i++ { - r, _ = dns.Exchange(m, udp) - if len(r.Answer) == 0 { + r, err = dns.Exchange(m, udp) + if len(r.Answer) != 0 { break } time.Sleep(100 * time.Microsecond) } - if len(r.Answer) != 0 { + if len(r.Answer) == 0 { t.Fatalf("Expected answer section") } } From 1fda5703fc83f21b21bbe3b6796e476397394e18 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 1 Aug 2019 13:50:28 +0000 Subject: [PATCH 059/324] Release 1.6.1 (#3081) ... and another one Signed-off-by: Miek Gieben --- coremain/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coremain/version.go b/coremain/version.go index 182af29afa4..11b7673451d 100644 --- a/coremain/version.go +++ b/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.6.0" + CoreVersion = "1.6.1" coreName = "CoreDNS" serverType = "dns" ) From 6faff83d7f8eb892f9a7d2388bcdacc4c1baf20b Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 1 Aug 2019 14:00:37 +0000 Subject: [PATCH 060/324] Make -f Makefile.doc: mechanical change (#3082) Wanted this *before* 1.6.1 but I forgot; ah well... Signed-off-by: Miek Gieben --- man/coredns-file.7 | 2 +- man/coredns-hosts.7 | 11 +++++++++-- man/coredns-reload.7 | 7 ++++--- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/man/coredns-file.7 b/man/coredns-file.7 index 5cc685b10e5..90327a38ead 100644 --- a/man/coredns-file.7 +++ b/man/coredns-file.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-FILE" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FILE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-hosts.7 b/man/coredns-hosts.7 index e5c43dd4331..f3ba8d1b80c 100644 --- a/man/coredns-hosts.7 +++ b/man/coredns-hosts.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-HOSTS" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HOSTS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -17,6 +17,10 @@ The plugin reloads the content of the hosts file every 5 seconds. Upon reload, C new definitions. Should the file be deleted, any inlined content will continue to be served. When the file is restored, it will then again be used. +.PP +If you want to pass the request to the rest of the plugin chain if there is no match in the \fIhosts\fP +plugin, you must specify the \fB\fCfallthrough\fR option. + .PP This plugin can only be used once per Server Block. @@ -76,7 +80,10 @@ file path will still be read but entries will be overridden. .IP \(bu 4 \fB\fCttl\fR change the DNS TTL of the records generated (forward and reverse). The default is 3600 seconds (1 hour). .IP \(bu 4 -\fB\fCreload\fR change the period between each hostsfile reload. A time of zero seconds disable the feature. Examples of valid durations: "300ms", "1.5h" or "2h45m" are valid duration with units "ns" (nanosecond), "us" (or "µs" for microsecond), "ms" (millisecond), "s" (second), "m" (minute), "h" (hour). +\fB\fCreload\fR change the period between each hostsfile reload. A time of zero seconds disables the +feature. Examples of valid durations: "300ms", "1.5h" or "2h45m". See Go's +time +\[la]https://godoc.org/time\[ra]. package. .IP \(bu 4 \fB\fCno_reverse\fR disable the automatic generation of the \fB\fCin-addr.arpa\fR or \fB\fCip6.arpa\fR entries for the hosts .IP \(bu 4 diff --git a/man/coredns-reload.7 b/man/coredns-reload.7 index 1f2efbab02f..4a52f6fbfc6 100644 --- a/man/coredns-reload.7 +++ b/man/coredns-reload.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-RELOAD" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-RELOAD" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -131,8 +131,9 @@ closed in step 1; so the health endpoint is broken. The same can hopen in the pr In general be careful with assigning new port and expecting reload to work fully. .PP -Also any \fB\fCimport\fR statement is not discovered by this plugin. This means if any of these imported files -changes the \fIreload\fP plugin is ignorant of that fact. +In CoreDNS v1.6.0 and earlier any \fB\fCimport\fR statements are not discovered by this plugin. +This means if any of these imported files changes the \fIreload\fP plugin is ignorant of that fact. +CoreDNS v1.7.0 and later does parse the Corefile and supports detecting changes in imported files. .SH "METRICS" .PP From fc1e313ca7e0dad73de0db28e5cb9ef2828e6441 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 3 Aug 2019 09:31:01 +0000 Subject: [PATCH 061/324] plugin/file: unify a serial logging (#3088) Use %d SOA serial when logging about the SOA serial. Signed-off-by: Miek Gieben --- plugin/file/file.go | 4 ++-- plugin/file/reload.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugin/file/file.go b/plugin/file/file.go index 1aebf6cda0d..d8de85cd3c4 100644 --- a/plugin/file/file.go +++ b/plugin/file/file.go @@ -58,7 +58,7 @@ func (f File) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (i if ok { z.TransferIn() } else { - log.Infof("Notify from %s for %s: no serial increase seen", state.IP(), zone) + log.Infof("Notify from %s for %s: no SOA serial increase seen", state.IP(), zone) } if err != nil { log.Warningf("Notify from %s for %s: failed primary check: %s", state.IP(), zone, err) @@ -115,7 +115,7 @@ type serialErr struct { } func (s *serialErr) Error() string { - return fmt.Sprintf("%s for origin %s in file %s, with serial %d", s.err, s.origin, s.zone, s.serial) + return fmt.Sprintf("%s for origin %s in file %s, with %d SOA serial", s.err, s.origin, s.zone, s.serial) } // Parse parses the zone in filename and returns a new Zone or an error. diff --git a/plugin/file/reload.go b/plugin/file/reload.go index eb762ac5279..c56d8f37f05 100644 --- a/plugin/file/reload.go +++ b/plugin/file/reload.go @@ -38,7 +38,7 @@ func (z *Zone) Reload() error { z.Tree = zone.Tree z.Unlock() - log.Infof("Successfully reloaded zone %q in %q with serial %d", z.origin, zFile, z.Apex.SOA.Serial) + log.Infof("Successfully reloaded zone %q in %q with %d SOA serial", z.origin, zFile, z.Apex.SOA.Serial) z.Notify() case <-z.reloadShutdown: From 94468c41b088a3153a1c77997c18a952d73f0b30 Mon Sep 17 00:00:00 2001 From: Matt Kulka Date: Sat, 3 Aug 2019 18:07:28 -0700 Subject: [PATCH 062/324] plugin/route53: make refresh frequency adjustable (#3083) the current update frequency for the refresh loop in the route 53 plugin is hard-coded to 1 minute. aws rate-limits the number of api requests so less frequent record refreshes can help when reaching those limits depending upon your individual scenarios. this pull adds a configuration option to the route53 plugin to adjust the refresh frequency. thanks for getting my last pull released so quickly. this is the last local change that i have been running and would love to get it contributed back to the project. Signed-off-by: Matt Kulka --- plugin/route53/README.md | 18 ++++++++++++++++++ plugin/route53/route53.go | 8 +++++--- plugin/route53/route53_test.go | 5 +++-- plugin/route53/setup.go | 24 +++++++++++++++++++++++- plugin/route53/setup_test.go | 16 ++++++++++++++++ 5 files changed, 65 insertions(+), 6 deletions(-) diff --git a/plugin/route53/README.md b/plugin/route53/README.md index d492ae913c3..e704ced602d 100644 --- a/plugin/route53/README.md +++ b/plugin/route53/README.md @@ -18,6 +18,7 @@ route53 [ZONE:HOSTED_ZONE_ID...] { aws_access_key [AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY] credentials PROFILE [FILENAME] fallthrough [ZONES...] + refresh DURATION } ~~~ @@ -48,6 +49,14 @@ route53 [ZONE:HOSTED_ZONE_ID...] { * **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block. +* `refresh` can be used to control how long between record retrievals from Route 53. It requires + a duration string as a parameter to specify the duration between update cycles. Each update + cycle may result in many AWS API calls depending on how many domains use this plugin and how + many records are in each. Adjusting the update frequency may help reduce the potential of API + rate-limiting imposed by AWS. + +* **DURATION** A duration string. Defaults to `1m`. If units are unspecified, seconds are assumed. + ## Examples Enable route53 with implicit AWS credentials and resolve CNAMEs via 10.0.0.1: @@ -86,3 +95,12 @@ Enable route53 with multiple hosted zones with the same domain: route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 example.org.:Z93A52145678156 } ~~~ + +Enable route53 and refresh records every 3 minutes +~~~ txt +. { + route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { + refresh 3m + } +} +~~~ diff --git a/plugin/route53/route53.go b/plugin/route53/route53.go index 0f7c2b1d266..346defa66d6 100644 --- a/plugin/route53/route53.go +++ b/plugin/route53/route53.go @@ -31,6 +31,7 @@ type Route53 struct { zoneNames []string client route53iface.Route53API upstream *upstream.Upstream + refresh time.Duration zMu sync.RWMutex zones zones @@ -49,7 +50,7 @@ type zones map[string][]*zone // exist, and returns a new *Route53. In addition to this, upstream is passed // for doing recursive queries against CNAMEs. // Returns error if it cannot verify any given domain name/zone id pair. -func New(ctx context.Context, c route53iface.Route53API, keys map[string][]string, up *upstream.Upstream) (*Route53, error) { +func New(ctx context.Context, c route53iface.Route53API, keys map[string][]string, up *upstream.Upstream, refresh time.Duration) (*Route53, error) { zones := make(map[string][]*zone, len(keys)) zoneNames := make([]string, 0, len(keys)) for dns, hostedZoneIDs := range keys { @@ -72,6 +73,7 @@ func New(ctx context.Context, c route53iface.Route53API, keys map[string][]strin zoneNames: zoneNames, zones: zones, upstream: up, + refresh: refresh, }, nil } @@ -87,7 +89,7 @@ func (h *Route53) Run(ctx context.Context) error { case <-ctx.Done(): log.Infof("Breaking out of Route53 update loop: %v", ctx.Err()) return - case <-time.After(1 * time.Minute): + case <-time.After(h.refresh): if err := h.updateZones(ctx); err != nil && ctx.Err() == nil /* Don't log error if ctx expired. */ { log.Errorf("Failed to update zones: %v", err) } @@ -248,7 +250,7 @@ func (h *Route53) updateZones(ctx context.Context) error { newZ.Upstream = h.upstream in := &route53.ListResourceRecordSetsInput{ HostedZoneId: aws.String(hostedZone.id), - MaxItems: aws.String("1000"), + MaxItems: aws.String("1000"), } err = h.client.ListResourceRecordSetsPagesWithContext(ctx, in, func(out *route53.ListResourceRecordSetsOutput, last bool) bool { diff --git a/plugin/route53/route53_test.go b/plugin/route53/route53_test.go index 5657e7c7ad9..64ea90d6a0f 100644 --- a/plugin/route53/route53_test.go +++ b/plugin/route53/route53_test.go @@ -5,6 +5,7 @@ import ( "errors" "reflect" "testing" + "time" "github.com/coredns/coredns/plugin/pkg/dnstest" "github.com/coredns/coredns/plugin/pkg/fall" @@ -79,7 +80,7 @@ func (fakeRoute53) ListResourceRecordSetsPagesWithContext(_ aws.Context, in *rou func TestRoute53(t *testing.T) { ctx := context.Background() - r, err := New(ctx, fakeRoute53{}, map[string][]string{"bad.": {"0987654321"}}, &upstream.Upstream{}) + r, err := New(ctx, fakeRoute53{}, map[string][]string{"bad.": {"0987654321"}}, &upstream.Upstream{}, time.Duration(1) * time.Minute) if err != nil { t.Fatalf("Failed to create Route53: %v", err) } @@ -87,7 +88,7 @@ func TestRoute53(t *testing.T) { t.Fatalf("Expected errors for zone bad.") } - r, err = New(ctx, fakeRoute53{}, map[string][]string{"org.": {"1357986420", "1234567890"}, "gov.": {"Z098765432", "1234567890"}}, &upstream.Upstream{}) + r, err = New(ctx, fakeRoute53{}, map[string][]string{"org.": {"1357986420", "1234567890"}, "gov.": {"Z098765432", "1234567890"}}, &upstream.Upstream{}, time.Duration(90) * time.Second) if err != nil { t.Fatalf("Failed to create Route53: %v", err) } diff --git a/plugin/route53/setup.go b/plugin/route53/setup.go index 1872dce4e35..918b0e56a29 100644 --- a/plugin/route53/setup.go +++ b/plugin/route53/setup.go @@ -2,7 +2,10 @@ package route53 import ( "context" + "fmt" + "strconv" "strings" + "time" "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" @@ -53,6 +56,8 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro up := upstream.New() + refresh := time.Duration(1) * time.Minute // default update frequency to 1 minute + args := c.RemainingArgs() for i := 0; i < len(args); i++ { @@ -98,6 +103,23 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro } case "fallthrough": fall.SetZonesFromArgs(c.RemainingArgs()) + case "refresh": + if c.NextArg() { + refreshStr := c.Val() + _, err := strconv.Atoi(refreshStr) + if err == nil { + refreshStr = fmt.Sprintf("%ss", c.Val()) + } + refresh, err = time.ParseDuration(refreshStr) + if err != nil { + return c.Errf("Unable to parse duration: '%v'", err) + } + if refresh <= 0 { + return c.Errf("refresh interval must be greater than 0: %s", refreshStr) + } + } else { + return c.ArgErr() + } default: return c.Errf("unknown property '%s'", c.Val()) } @@ -107,7 +129,7 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro }) client := f(credentials.NewChainCredentials(providers)) ctx := context.Background() - h, err := New(ctx, client, keys, up) + h, err := New(ctx, client, keys, up, refresh) if err != nil { return c.Errf("failed to create Route53 plugin: %v", err) } diff --git a/plugin/route53/setup_test.go b/plugin/route53/setup_test.go index 3e827eb58a9..998285e46f0 100644 --- a/plugin/route53/setup_test.go +++ b/plugin/route53/setup_test.go @@ -51,6 +51,22 @@ func TestSetupRoute53(t *testing.T) { {`route53 example.org:12345678 example.org:12345678 { }`, true}, + {`route53 example.org:12345678 { + refresh 90 +}`, false}, + {`route53 example.org:12345678 { + refresh 5m +}`, false}, + {`route53 example.org:12345678 { + refresh +}`, true}, + {`route53 example.org:12345678 { + refresh foo +}`, true}, + {`route53 example.org:12345678 { + refresh -1m +}`, true}, + {`route53 example.org { }`, true}, } From cdb2c8e7651bc3647a4bc2a37f1e36bb9118b80d Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2019 07:50:21 -0700 Subject: [PATCH 063/324] build(deps): bump github.com/aws/aws-sdk-go from 1.21.6 to 1.21.9 (#3089) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.21.6 to 1.21.9. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.21.6...v1.21.9) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index e77aead100e..41608e5c6a6 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.21.6 + github.com/aws/aws-sdk-go v1.21.9 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index 4d62d6179c6..41b1ae2bc64 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,8 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.21.6 h1:3GuIm55Uls52aQIDGBnSEZbk073jpasfQyeM5eZU61Q= github.com/aws/aws-sdk-go v1.21.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.21.9 h1:+HXP97l4IbJvccwwNoweEknroEcX8QLwExcnc+Kxobg= +github.com/aws/aws-sdk-go v1.21.9/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 883d47685f7c2803c01a6b9822a96629b35d69a0 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2019 07:50:38 -0700 Subject: [PATCH 064/324] build(deps): bump github.com/prometheus/client_golang (#3090) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.0.0 to 1.1.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.0.0...v1.1.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 41608e5c6a6..49fafc84c64 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 github.com/philhofer/fwd v1.0.0 // indirect github.com/pkg/errors v0.8.1 // indirect - github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/common v0.6.0 github.com/sirupsen/logrus v1.4.2 // indirect diff --git a/go.sum b/go.sum index 41b1ae2bc64..ff4461070ce 100644 --- a/go.sum +++ b/go.sum @@ -23,6 +23,7 @@ github.com/aws/aws-sdk-go v1.21.9/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqTKo= github.com/caddyserver/caddy v1.0.1/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= @@ -103,6 +104,7 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -146,6 +148,7 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -174,8 +177,10 @@ github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJ github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -207,6 +212,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -217,6 +224,7 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -318,6 +326,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= From 935354deb3480763b5f5084c94320df66c22c681 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 8 Aug 2019 15:17:53 +0100 Subject: [PATCH 065/324] README: add DoH example (#3096) Show example for DoH. And cleanup the README (format to 80 column linewidth). Fixes: #3094 Signed-off-by: Miek Gieben --- README.md | 44 +++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 78c02ed5558..22208502f4d 100644 --- a/README.md +++ b/README.md @@ -29,15 +29,17 @@ Currently CoreDNS is able to: * Allow for zone transfers, i.e., act as a primary server (*file*). * Automatically load zone files from disk (*auto*). * Caching of DNS responses (*cache*). -* Use etcd as a backend (replace [SkyDNS](https://github.com/skynetservices/skydns)) (*etcd*). +* Use etcd as a backend (replacing [SkyDNS](https://github.com/skynetservices/skydns)) (*etcd*). * Use k8s (kubernetes) as a backend (*kubernetes*). * Serve as a proxy to forward queries to some other (recursive) nameserver (*forward*). * Provide metrics (by using Prometheus) (*metrics*). * Provide query (*log*) and error (*errors*) logging. +* Integrate with cloud providers (*route53*). * Support the CH class: `version.bind` and friends (*chaos*). * Support the RFC 5001 DNS name server identifier (NSID) option (*nsid*). * Profiling support (*pprof*). * Rewrite queries (qtype, qclass and qname) (*rewrite* and *template*). +* Block ANY queries (*any*). And more. Each of the plugins is documented. See [coredns.io/plugins](https://coredns.io/plugins) for all in-tree plugins, and [coredns.io/explugins](https://coredns.io/explugins) for all @@ -45,11 +47,13 @@ out-of-tree plugins. ## Compilation from Source -To compile CoreDNS, we assume you have a working Go setup. See various tutorials if you don’t have that already configured. +To compile CoreDNS, we assume you have a working Go setup. See various tutorials if you don’t have +that already configured. First, make sure your golang version is 1.12 or higher as `go mod` support is needed. See [here](https://github.com/golang/go/wiki/Modules) for `go mod` details. Then, check out the project and run `make` to compile the binary: + ~~~ $ git clone https://github.com/coredns/coredns $ cd coredns @@ -60,8 +64,8 @@ This should yield a `coredns` binary. ## Compilation with Docker -CoreDNS requires Go to compile. However, if you already have docker installed and prefer not to setup -a Go environment, you could build CoreDNS easily: +CoreDNS requires Go to compile. However, if you already have docker installed and prefer not to +setup a Go environment, you could build CoreDNS easily: ``` $ docker run --rm -i -t -v $PWD:/go/src/github.com/coredns/coredns \ @@ -85,8 +89,8 @@ CoreDNS-001 Any query sent to port 53 should return some information; your sending address, port and protocol used. -If you have a Corefile without a port number specified it will, by default, use port 53, but you -can override the port with the `-dns.port` flag: +If you have a Corefile without a port number specified it will, by default, use port 53, but you can +override the port with the `-dns.port` flag: `./coredns -dns.port 1053`, runs the server on port 1053. @@ -101,8 +105,8 @@ Start a simple proxy. You'll need to be root to start listening on port 53. } ~~~ -Just start CoreDNS: `./coredns`. Then just query on that port (53). The query should be forwarded to -8.8.8.8 and the response will be returned. Each query should also show up in the log which is +Just start CoreDNS: `./coredns`. Then just query on that port (53). The query should be forwarded +to 8.8.8.8 and the response will be returned. Each query should also show up in the log which is printed on standard output. Serve the (NSEC) DNSSEC-signed `example.org` on port 1053, with errors and logging sent to standard @@ -120,8 +124,8 @@ example.org:1053 { } ~~~ -Serve `example.org` on port 1053, but forward everything that does *not* match `example.org` to a recursive -nameserver *and* rewrite ANY queries to HINFO. +Serve `example.org` on port 1053, but forward everything that does *not* match `example.org` to a +recursive nameserver *and* rewrite ANY queries to HINFO. ~~~ txt .:1053 { @@ -152,7 +156,7 @@ add the closing dot: `10.0.0.0/24.` as this also stops the conversion. This even works for CIDR (See RFC 1518 and 1519) addressing, i.e. `10.0.0.0/25`, CoreDNS will then check if the `in-addr` request falls in the correct range. -Listening on TLS and for gRPC? Use: +Listening on TLS (DoT) and for gRPC? Use: ~~~ corefile tls://example.org grpc://example.org { @@ -160,6 +164,14 @@ tls://example.org grpc://example.org { } ~~~ +And for DNS over HTTP/2 (DoH) use: + +~~~ corefile +https://example.org { + whoami +} +~~~ + Specifying ports works in the same way: ~~~ txt @@ -186,7 +198,8 @@ More resources can be found: ## Contribution guidelines -If you want to contribute to CoreDNS, be sure to review the [contribution guidelines](CONTRIBUTING.md). +If you want to contribute to CoreDNS, be sure to review the [contribution +guidelines](CONTRIBUTING.md). ## Deployment @@ -210,8 +223,8 @@ And finally 1.4.1 that removes the config workarounds. ## Security ### Security Audit - -A third party security audit was performed by Cure53, you can see the full report [here](https://coredns.io/assets/DNS-01-report.pdf). +A third party security audit was performed by Cure53, you can see the full report +[here](https://coredns.io/assets/DNS-01-report.pdf). ### Reporting security vulnerabilities @@ -219,4 +232,5 @@ If you find a security vulnerability or any security related issues, please DO N issue, instead send your report privately to `security@coredns.io`. Security reports are greatly appreciated and we will publicly thank you for it. -Please consult [security vulnerability disclosures and security fix and release process document](https://github.com/coredns/coredns/blob/master/SECURITY-RELEASE-PROCESS.md) +Please consult [security vulnerability disclosures and security fix and release process +document](https://github.com/coredns/coredns/blob/master/SECURITY-RELEASE-PROCESS.md) From 5b74d0f957a2565c8ee168ad55283b0914cac9d7 Mon Sep 17 00:00:00 2001 From: ethan Date: Fri, 9 Aug 2019 00:27:53 +0800 Subject: [PATCH 066/324] metrics.go: hemp message correction (#3100) Signed-off-by: ethan --- plugin/forward/metrics.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/forward/metrics.go b/plugin/forward/metrics.go index 0fe470926f4..e120f55fc17 100644 --- a/plugin/forward/metrics.go +++ b/plugin/forward/metrics.go @@ -31,13 +31,13 @@ var ( Namespace: plugin.Namespace, Subsystem: "forward", Name: "healthcheck_failure_count_total", - Help: "Counter of the number of failed healtchecks.", + Help: "Counter of the number of failed healthchecks.", }, []string{"to"}) HealthcheckBrokenCount = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: plugin.Namespace, Subsystem: "forward", Name: "healthcheck_broken_count_total", - Help: "Counter of the number of complete failures of the healtchecks.", + Help: "Counter of the number of complete failures of the healthchecks.", }) SocketGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: plugin.Namespace, From 879466b0288a2d11e278950375c7593b60ea0677 Mon Sep 17 00:00:00 2001 From: Darshan Chaudhary Date: Fri, 9 Aug 2019 12:40:28 +0530 Subject: [PATCH 067/324] Add plugin for Azure DNS (#2945) * Add plugin for Azure DNS Signed-off-by: darshanime * Rename AzureDNS plugin to Azure Signed-off-by: darshanime * remove upstream from azure syntax Signed-off-by: darshanime * Rename azure plugin block keynames Signed-off-by: darshanime * Normalize zone name before lookup in zones Signed-off-by: darshanime * Update import path for caddy Signed-off-by: darshanime * normalize azure zone name only if required Signed-off-by: darshanime * Add support for MX, SRV, TXT, records Signed-off-by: darshanime * Add specs for new record types Signed-off-by: darshanime * Use sequential updates for zones Signed-off-by: darshanime * Add OWNERS file for azure plugin Signed-off-by: darshanime * Rename imports for third party packages Signed-off-by: darshanime * Capitalize values in README Signed-off-by: darshanime * Shorten keys for azure plugin config Signed-off-by: darshanime * Fixup readme for azure plugin Signed-off-by: darshanime --- core/dnsserver/zdirectives.go | 1 + core/plugin/zplugin.go | 1 + go.mod | 24 +-- go.sum | 131 +++++++++------ plugin.cfg | 1 + plugin/azure/OWNERS | 8 + plugin/azure/README.md | 52 ++++++ plugin/azure/azure.go | 293 ++++++++++++++++++++++++++++++++++ plugin/azure/azure_test.go | 180 +++++++++++++++++++++ plugin/azure/setup.go | 123 ++++++++++++++ plugin/azure/setup_test.go | 72 +++++++++ 11 files changed, 825 insertions(+), 61 deletions(-) create mode 100644 plugin/azure/OWNERS create mode 100644 plugin/azure/README.md create mode 100644 plugin/azure/azure.go create mode 100644 plugin/azure/azure_test.go create mode 100644 plugin/azure/setup.go create mode 100644 plugin/azure/setup_test.go diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go index 75c79f3e8fe..b897cc04fa7 100644 --- a/core/dnsserver/zdirectives.go +++ b/core/dnsserver/zdirectives.go @@ -36,6 +36,7 @@ var Directives = []string{ "template", "hosts", "route53", + "azure", "federation", "k8s_external", "kubernetes", diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index 94d7f5f4705..0c3b447ce1f 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -8,6 +8,7 @@ import ( _ "github.com/coredns/coredns/plugin/any" _ "github.com/coredns/coredns/plugin/auto" _ "github.com/coredns/coredns/plugin/autopath" + _ "github.com/coredns/coredns/plugin/azure" _ "github.com/coredns/coredns/plugin/bind" _ "github.com/coredns/coredns/plugin/cache" _ "github.com/coredns/coredns/plugin/cancel" diff --git a/go.mod b/go.mod index 49fafc84c64..35ba659df58 100644 --- a/go.mod +++ b/go.mod @@ -4,27 +4,29 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect + github.com/Azure/azure-sdk-for-go v31.1.0+incompatible + github.com/Azure/go-autorest/autorest v0.5.0 + github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 + github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect - github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.21.9 + github.com/aws/aws-sdk-go v1.21.2 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible github.com/coreos/go-semver v0.2.0 // indirect github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect - github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 github.com/evanphx/json-patch v4.1.0+incompatible // indirect github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/golang/protobuf v1.3.2 + github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/googleapis/gnostic v0.2.0 // indirect github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.8.3 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/imdario/mergo v0.3.7 // indirect github.com/jonboulle/clockwork v0.1.0 // indirect @@ -34,9 +36,10 @@ require ( github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 + github.com/openzipkin/zipkin-go-opentracing v0.3.5 // indirect github.com/philhofer/fwd v1.0.0 // indirect github.com/pkg/errors v0.8.1 // indirect - github.com/prometheus/client_golang v1.1.0 + github.com/prometheus/client_golang v1.0.0 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/common v0.6.0 github.com/sirupsen/logrus v1.4.2 // indirect @@ -54,15 +57,16 @@ require ( golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect - google.golang.org/grpc v1.22.1 - gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 + google.golang.org/grpc v1.22.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/api v0.0.0-20190620084959-7cf5895f2711 - k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 - k8s.io/client-go v0.0.0-20190620085101-78d2af792bab + k8s.io/api v0.0.0-20190313235455-40a48860b5ab + k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 + k8s.io/client-go v11.0.0+incompatible k8s.io/klog v0.3.3 k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 // indirect k8s.io/utils v0.0.0-20190529001817-6999998975a7 // indirect + sigs.k8s.io/yaml v1.1.0 // indirect ) replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.15 diff --git a/go.sum b/go.sum index ff4461070ce..4ae5cff1689 100644 --- a/go.sum +++ b/go.sum @@ -3,11 +3,34 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= -github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/azure-sdk-for-go v31.1.0+incompatible h1:5SzgnfAvUBdBwNTN23WLfZoCt/rGhLvd7QdCAaFXgY4= +github.com/Azure/azure-sdk-for-go v31.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest v0.5.0 h1:Mlm9qy2fpQ9MvfyI41G2Zf5B4CsgjjNbLOWszfK6KrY= +github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/adal v0.2.0 h1:7IBDu1jgh+ADHXnEYExkV9RE/ztOOlxdACkkPRthGKw= +github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 h1:YgO/vSnJEc76NLw2ecIXvXa8bDWiqf1pOJzARAoZsYU= +github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0 h1:YTtBrcb6mhA+PoSW8WxFDoIIyjp13XqJeX80ssQtri4= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.21.0 h1:0GKs+e8mn1RRUzfg9oUXv3v7ZieQLmOZF/bfnmmGhM8= github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= @@ -16,19 +39,28 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.21.6 h1:3GuIm55Uls52aQIDGBnSEZbk073jpasfQyeM5eZU61Q= -github.com/aws/aws-sdk-go v1.21.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.21.9 h1:+HXP97l4IbJvccwwNoweEknroEcX8QLwExcnc+Kxobg= -github.com/aws/aws-sdk-go v1.21.9/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.15 h1:y9ts8MJhB7ReUidS6Rq+0KxdFeL01J+pmOlGq6YqpiQ= +github.com/aws/aws-sdk-go v1.20.15/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.17 h1:ZQ0cM9FpuufVDHlNViD8vD68IkEQL/VUOMwJPBqkaaM= +github.com/aws/aws-sdk-go v1.20.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.20 h1:OAR/GtjMOhenkp1NNKr1N1FgIP3mQXHeGbRhvVIAQp0= +github.com/aws/aws-sdk-go v1.20.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.21.0 h1:dRGzi4XZe5GFSJssHkRNUf/hRD/HL8bdqTYa9hpAO8c= +github.com/aws/aws-sdk-go v1.21.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.21.1 h1:IOFDnCEDybcw4V8nbKqyyjBu+vpu7hFYSfZqNuogi7I= +github.com/aws/aws-sdk-go v1.21.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.21.2 h1:CqbWrQzi7s8J2F0TRRdLvTr0+bt5Zxo2IDoFNGsAiUg= +github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqTKo= github.com/caddyserver/caddy v1.0.1/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= @@ -44,12 +76,12 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -57,8 +89,6 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 h1:QdyRyGZWLEvJG5Kw3VcVJvhXJ5tZ1MkRgqpJOEZSySM= @@ -76,13 +106,12 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -95,39 +124,36 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd h1:tkA3C/XTk8iACLOlTez37pL+0iGSYkkRGKdXgJ6ZylM= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.8.3 h1:wZ6biDjnBgIOf5t+r8dYsoGWH1Zl2Ps32OxD80Odbk8= github.com/grpc-ecosystem/grpc-gateway v1.8.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -137,7 +163,6 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= @@ -145,10 +170,8 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5i github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -177,20 +200,20 @@ github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJ github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= @@ -199,7 +222,9 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go-opentracing v0.3.5 h1:nZPvd2EmRKP+NzFdSuxZF/FG4Y4W2gn6ugXliTAu9o0= +github.com/openzipkin/zipkin-go-opentracing v0.3.5/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= @@ -210,21 +235,22 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -235,7 +261,6 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -251,7 +276,10 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -260,11 +288,11 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -284,7 +312,6 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -299,7 +326,6 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8 golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -313,11 +339,13 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -326,17 +354,15 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -350,7 +376,9 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -365,21 +393,24 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD5x+fRy6U0SXxZehzUHRSYUJcKfII= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4nH2UWo= -gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 h1:2LhklnAJsRSelbnBrrE5QuRleRDkmOh2JWxOtIX6yec= +gopkg.in/DataDog/dd-trace-go.v1 v1.15.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= +gopkg.in/DataDog/dd-trace-go.v1 v1.16.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= @@ -393,24 +424,22 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50= -k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= -k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 h1:uV4S5IB5g4Nvi+TBVNf3e9L4wrirlwYJ6w88jUQxTUw= -k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= -k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g= -k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= +k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc= +k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjGoLSdVOv2DbPaWHJ+ZtgKg= +k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= +k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 h1:zia7dTzfEtdiSUxi9cXUDsSQH2xE6igmGKyFn2on/9A= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190529001817-6999998975a7 h1:5UOdmwfY+7XsXvo26XeCDu9GhHJPkO1z8Mcz5AHMnOE= k8s.io/utils v0.0.0-20190529001817-6999998975a7/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/plugin.cfg b/plugin.cfg index 4c7dd8385b3..4d18c7d4e5f 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -45,6 +45,7 @@ autopath:autopath template:template hosts:hosts route53:route53 +azure:azure federation:federation k8s_external:k8s_external kubernetes:kubernetes diff --git a/plugin/azure/OWNERS b/plugin/azure/OWNERS new file mode 100644 index 00000000000..c30e67ae3fd --- /dev/null +++ b/plugin/azure/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - miekg + - yongtang + - darshanime +approvers: + - miekg + - yongtang + - darshanime diff --git a/plugin/azure/README.md b/plugin/azure/README.md new file mode 100644 index 00000000000..5c7be25a8c2 --- /dev/null +++ b/plugin/azure/README.md @@ -0,0 +1,52 @@ +# azure + +## Name + +*azure* - enables serving zone data from Microsoft Azure DNS service. + +## Description + +The azure plugin is useful for serving zones from Microsoft Azure DNS. +Thi *azure* plugin supports all the DNS records supported by Azure, viz. A, AAAA, CAA, CNAME, MX, NS, PTR, SOA, SRV, and TXT record types. For a non-existing resource record, zone's SOA response will returned. + + +## Syntax + +~~~ txt +azure RESOURCE_GROUP:ZONE... { + tenant TENANT_ID + client CLIENT_ID + secret CLIENT_SECRET + subscription SUBSCRIPTION_ID +} +~~~ + +* **`RESOURCE_GROUP`** The resource group to which the dns hosted zones belong on Azure + +* **`ZONE`** the zone that contains the resource record sets to be + accessed. + +* `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. + If **ZONES** is omitted, then fallthrough happens for all zones for which the plugin is + authoritative. If specific zones are listed (for example `in-addr.arpa` and `ip6.arpa`), then + only queries for those zones will be subject to fallthrough. + +* `environment` the azure environment to use. Defaults to `AzurePublicCloud`. Possible values: `AzureChinaCloud`, `AzureGermanCloud`, `AzurePublicCloud`, `AzureUSGovernmentCloud`. + +## Examples + +Enable the *azure* plugin with Azure credentials: + +~~~ txt +. { + azure resource_group_foo:foo.com { + tenant 123abc-123abc-123abc-123abc + client 123abc-123abc-123abc-123abc + secret 123abc-123abc-123abc-123abc + subscription 123abc-123abc-123abc-123abc + } +} +~~~ + +## Also See +- [Azure DNS Overview](https://docs.microsoft.com/en-us/azure/dns/dns-overview) diff --git a/plugin/azure/azure.go b/plugin/azure/azure.go new file mode 100644 index 00000000000..f3b2fad404e --- /dev/null +++ b/plugin/azure/azure.go @@ -0,0 +1,293 @@ +package azure + +import ( + "context" + "fmt" + "net" + "sync" + "time" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/pkg/fall" + "github.com/coredns/coredns/plugin/pkg/upstream" + "github.com/coredns/coredns/request" + + azuredns "github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns" + "github.com/miekg/dns" +) + +type zone struct { + id string + z *file.Zone + zone string +} + +type zones map[string][]*zone + +// Azure is the core struct of the azure plugin. +type Azure struct { + Next plugin.Handler + Fall fall.F + zoneNames []string + client azuredns.RecordSetsClient + upstream *upstream.Upstream + zMu sync.RWMutex + zones zones +} + +// New validates the input DNS zones and initializes the Azure struct. +func New(ctx context.Context, dnsClient azuredns.RecordSetsClient, keys map[string][]string, up *upstream.Upstream) (*Azure, error) { + zones := make(map[string][]*zone, len(keys)) + zoneNames := make([]string, 0, len(keys)) + for resourceGroup, inputZoneNames := range keys { + for _, zoneName := range inputZoneNames { + _, err := dnsClient.ListAllByDNSZone(context.Background(), resourceGroup, zoneName, nil, "") + if err != nil { + return nil, err + } + // Normalizing zoneName to make it fqdn if required. + zoneNameFQDN := dns.Fqdn(zoneName) + if _, ok := zones[zoneNameFQDN]; !ok { + zoneNames = append(zoneNames, zoneNameFQDN) + } + zones[zoneNameFQDN] = append(zones[zoneNameFQDN], &zone{id: resourceGroup, zone: zoneName, z: file.NewZone(zoneName, "")}) + } + } + return &Azure{ + client: dnsClient, + zones: zones, + zoneNames: zoneNames, + upstream: up, + }, nil +} + +// Run updates the zone from azure. +func (h *Azure) Run(ctx context.Context) error { + if err := h.updateZones(ctx); err != nil { + return err + } + go func() { + for { + select { + case <-ctx.Done(): + log.Infof("Breaking out of Azure update loop: %v", ctx.Err()) + return + case <-time.After(1 * time.Minute): + if err := h.updateZones(ctx); err != nil && ctx.Err() == nil { + log.Errorf("Failed to update zones: %v", err) + } + } + } + }() + return nil +} + +func (h *Azure) updateZones(ctx context.Context) error { + errs := make([]string, 0) + for zName, z := range h.zones { + for i, hostedZone := range z { + recordSet, err := h.client.ListByDNSZone(ctx, hostedZone.id, hostedZone.zone, nil, "") + if err != nil { + errs = append(errs, fmt.Sprintf("failed to list resource records for %v from azure: %v", hostedZone.zone, err)) + } + newZ := updateZoneFromResourceSet(recordSet, zName) + newZ.Upstream = h.upstream + h.zMu.Lock() + (*z[i]).z = newZ + h.zMu.Unlock() + } + } + + if len(errs) != 0 { + return fmt.Errorf("errors updating zones: %v", errs) + } + return nil + +} + +func updateZoneFromResourceSet(recordSet azuredns.RecordSetListResultPage, zName string) *file.Zone { + newZ := file.NewZone(zName, "") + + for _, result := range *(recordSet.Response().Value) { + resultFqdn := *(result.RecordSetProperties.Fqdn) + resultTTL := uint32(*(result.RecordSetProperties.TTL)) + if result.RecordSetProperties.ARecords != nil { + for _, A := range *(result.RecordSetProperties.ARecords) { + a := dns.A{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + Ttl: resultTTL}, + A: net.ParseIP(*(A.Ipv4Address))} + newZ.Insert(&a) + } + } + + if result.RecordSetProperties.AaaaRecords != nil { + for _, AAAA := range *(result.RecordSetProperties.AaaaRecords) { + aaaa := dns.AAAA{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypeAAAA, + Class: dns.ClassINET, + Ttl: resultTTL}, + AAAA: net.ParseIP(*(AAAA.Ipv6Address))} + newZ.Insert(&aaaa) + } + } + + if result.RecordSetProperties.MxRecords != nil { + for _, MX := range *(result.RecordSetProperties.MxRecords) { + mx := dns.MX{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypeMX, + Class: dns.ClassINET, + Ttl: resultTTL}, + Preference: uint16(*(MX.Preference)), + Mx: dns.Fqdn(*(MX.Exchange))} + newZ.Insert(&mx) + } + } + + if result.RecordSetProperties.PtrRecords != nil { + for _, PTR := range *(result.RecordSetProperties.PtrRecords) { + ptr := dns.PTR{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypePTR, + Class: dns.ClassINET, + Ttl: resultTTL}, + Ptr: dns.Fqdn(*(PTR.Ptrdname))} + newZ.Insert(&ptr) + } + } + + if result.RecordSetProperties.SrvRecords != nil { + for _, SRV := range *(result.RecordSetProperties.SrvRecords) { + srv := dns.SRV{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, + Ttl: resultTTL}, + Priority: uint16(*(SRV.Priority)), + Weight: uint16(*(SRV.Weight)), + Port: uint16(*(SRV.Port)), + Target: dns.Fqdn(*(SRV.Target))} + newZ.Insert(&srv) + } + } + + if result.RecordSetProperties.TxtRecords != nil { + for _, TXT := range *(result.RecordSetProperties.TxtRecords) { + txt := dns.TXT{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypeTXT, + Class: dns.ClassINET, + Ttl: resultTTL}, + Txt: *(TXT.Value)} + newZ.Insert(&txt) + } + } + + if result.RecordSetProperties.NsRecords != nil { + for _, NS := range *(result.RecordSetProperties.NsRecords) { + ns := dns.NS{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypeNS, + Class: dns.ClassINET, + Ttl: resultTTL}, + Ns: *(NS.Nsdname)} + newZ.Insert(&ns) + } + } + + if result.RecordSetProperties.SoaRecord != nil { + SOA := result.RecordSetProperties.SoaRecord + soa := dns.SOA{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypeSOA, + Class: dns.ClassINET, + Ttl: resultTTL}, + Minttl: uint32(*(SOA.MinimumTTL)), + Expire: uint32(*(SOA.ExpireTime)), + Retry: uint32(*(SOA.RetryTime)), + Refresh: uint32(*(SOA.RefreshTime)), + Serial: uint32(*(SOA.SerialNumber)), + Mbox: dns.Fqdn(*(SOA.Email)), + Ns: *(SOA.Host)} + newZ.Insert(&soa) + } + + if result.RecordSetProperties.CnameRecord != nil { + CNAME := result.RecordSetProperties.CnameRecord.Cname + cname := dns.CNAME{ + Hdr: dns.RR_Header{ + Name: resultFqdn, + Rrtype: dns.TypeCNAME, + Class: dns.ClassINET, + Ttl: resultTTL}, + Target: dns.Fqdn(*CNAME)} + newZ.Insert(&cname) + } + } + return newZ +} + +// ServeDNS implements the plugin.Handler interface. +func (h *Azure) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + qname := state.Name() + + zName := plugin.Zones(h.zoneNames).Matches(qname) + if zName == "" { + return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) + } + + z, ok := h.zones[zName] // ok true if we are authoritive for the zone. + if !ok || z == nil { + return dns.RcodeServerFailure, nil + } + + m := new(dns.Msg) + m.SetReply(r) + m.Authoritative = true + var result file.Result + for _, hostedZone := range z { + h.zMu.RLock() + m.Answer, m.Ns, m.Extra, result = hostedZone.z.Lookup(ctx, state, qname) + h.zMu.RUnlock() + + // record type exists for this name (NODATA). + if len(m.Answer) != 0 || result == file.NoData { + break + } + } + + if len(m.Answer) == 0 && result != file.NoData && h.Fall.Through(qname) { + return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) + } + + switch result { + case file.Success: + case file.NoData: + case file.NameError: + m.Rcode = dns.RcodeNameError + case file.Delegation: + m.Authoritative = false + case file.ServerFailure: + return dns.RcodeServerFailure, nil + } + + w.WriteMsg(m) + return dns.RcodeSuccess, nil +} + +// Name implements plugin.Handler.Name. +func (h *Azure) Name() string { return "azure" } diff --git a/plugin/azure/azure_test.go b/plugin/azure/azure_test.go new file mode 100644 index 00000000000..d006f196bce --- /dev/null +++ b/plugin/azure/azure_test.go @@ -0,0 +1,180 @@ +package azure + +import ( + "context" + "reflect" + "testing" + + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/pkg/dnstest" + "github.com/coredns/coredns/plugin/pkg/fall" + "github.com/coredns/coredns/plugin/test" + "github.com/coredns/coredns/request" + + "github.com/miekg/dns" +) + +var demoAzure = Azure{ + Next: testHandler(), + Fall: fall.Zero, + zoneNames: []string{"example.org.", "www.example.org.", "example.org.", "sample.example.org."}, + zones: testZones(), +} + +func testZones() zones { + zones := make(map[string][]*zone) + zones["example.org."] = append(zones["example.org."], &zone{zone: "example.org."}) + newZ := file.NewZone("example.org.", "") + + for _, rr := range []string{ + "example.org. 300 IN A 1.2.3.4", + "example.org. 300 IN AAAA 2001:db8:85a3::8a2e:370:7334", + "www.example.org. 300 IN A 1.2.3.4", + "www.example.org. 300 IN A 1.2.3.4", + "org. 172800 IN NS ns3-06.azure-dns.org.", + "org. 300 IN SOA ns1-06.azure-dns.com. azuredns-hostmaster.microsoft.com. 1 3600 300 2419200 300", + "cname.example.org. 300 IN CNAME example.org", + "mail.example.org. 300 IN MX 10 mailserver.example.com", + "ptr.example.org. 300 IN PTR www.ptr-example.com", + "example.org. 300 IN SRV 1 10 5269 srv-1.example.com.", + "example.org. 300 IN SRV 1 10 5269 srv-2.example.com.", + "txt.example.org. 300 IN TXT \"TXT for example.org\"", + } { + r, _ := dns.NewRR(rr) + newZ.Insert(r) + } + zones["example.org."][0].z = newZ + return zones +} + +func testHandler() test.HandlerFunc { + return func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + qname := state.Name() + m := new(dns.Msg) + rcode := dns.RcodeServerFailure + if qname == "example.gov." { // No records match, test fallthrough. + m.SetReply(r) + rr := test.A("example.gov. 300 IN A 2.4.6.8") + m.Answer = []dns.RR{rr} + m.Authoritative = true + rcode = dns.RcodeSuccess + } + m.SetRcode(r, rcode) + w.WriteMsg(m) + return rcode, nil + } +} + +func TestAzure(t *testing.T) { + tests := []struct { + qname string + qtype uint16 + wantRetCode int + wantAnswer []string + wantMsgRCode int + wantNS []string + expectedErr error + }{ + { + qname: "example.org.", + qtype: dns.TypeA, + wantAnswer: []string{"example.org. 300 IN A 1.2.3.4"}, + }, + { + qname: "example.org", + qtype: dns.TypeAAAA, + wantAnswer: []string{"example.org. 300 IN AAAA 2001:db8:85a3::8a2e:370:7334"}, + }, + { + qname: "example.org", + qtype: dns.TypeSOA, + wantAnswer: []string{"org. 300 IN SOA ns1-06.azure-dns.com. azuredns-hostmaster.microsoft.com. 1 3600 300 2419200 300"}, + }, + { + qname: "badexample.com", + qtype: dns.TypeA, + wantRetCode: dns.RcodeServerFailure, + wantMsgRCode: dns.RcodeServerFailure, + }, + { + qname: "example.gov", + qtype: dns.TypeA, + wantAnswer: []string{"example.gov. 300 IN A 2.4.6.8"}, + }, + { + qname: "example.org", + qtype: dns.TypeSRV, + wantAnswer: []string{"example.org. 300 IN SRV 1 10 5269 srv-1.example.com.", "example.org. 300 IN SRV 1 10 5269 srv-2.example.com."}, + }, + { + qname: "cname.example.org.", + qtype: dns.TypeCNAME, + wantAnswer: []string{"cname.example.org. 300 IN CNAME example.org."}, + }, + { + qname: "cname.example.org.", + qtype: dns.TypeA, + wantAnswer: []string{"cname.example.org. 300 IN CNAME example.org.", "example.org. 300 IN A 1.2.3.4"}, + }, + { + qname: "mail.example.org.", + qtype: dns.TypeMX, + wantAnswer: []string{"mail.example.org. 300 IN MX 10 mailserver.example.com."}, + }, + { + qname: "ptr.example.org.", + qtype: dns.TypePTR, + wantAnswer: []string{"ptr.example.org. 300 IN PTR www.ptr-example.com."}, + }, + { + qname: "txt.example.org.", + qtype: dns.TypeTXT, + wantAnswer: []string{"txt.example.org. 300 IN TXT \"TXT for example.org\""}, + }, + } + + for ti, tc := range tests { + req := new(dns.Msg) + req.SetQuestion(dns.Fqdn(tc.qname), tc.qtype) + + rec := dnstest.NewRecorder(&test.ResponseWriter{}) + code, err := demoAzure.ServeDNS(context.Background(), rec, req) + + if err != tc.expectedErr { + t.Fatalf("Test %d: Expected error %v, but got %v", ti, tc.expectedErr, err) + } + + if code != int(tc.wantRetCode) { + t.Fatalf("Test %d: Expected returned status code %s, but got %s", ti, dns.RcodeToString[tc.wantRetCode], dns.RcodeToString[code]) + } + + if tc.wantMsgRCode != rec.Msg.Rcode { + t.Errorf("Test %d: Unexpected msg status code. Want: %s, got: %s", ti, dns.RcodeToString[tc.wantMsgRCode], dns.RcodeToString[rec.Msg.Rcode]) + } + + if len(tc.wantAnswer) != len(rec.Msg.Answer) { + t.Errorf("Test %d: Unexpected number of Answers. Want: %d, got: %d", ti, len(tc.wantAnswer), len(rec.Msg.Answer)) + } else { + for i, gotAnswer := range rec.Msg.Answer { + if gotAnswer.String() != tc.wantAnswer[i] { + t.Errorf("Test %d: Unexpected answer.\nWant:\n\t%s\nGot:\n\t%s", ti, tc.wantAnswer[i], gotAnswer) + } + } + } + + if len(tc.wantNS) != len(rec.Msg.Ns) { + t.Errorf("Test %d: Unexpected NS number. Want: %d, got: %d", ti, len(tc.wantNS), len(rec.Msg.Ns)) + } else { + for i, ns := range rec.Msg.Ns { + got, ok := ns.(*dns.SOA) + if !ok { + t.Errorf("Test %d: Unexpected NS type. Want: SOA, got: %v", ti, reflect.TypeOf(got)) + } + if got.String() != tc.wantNS[i] { + t.Errorf("Test %d: Unexpected NS.\nWant: %v\nGot: %v", ti, tc.wantNS[i], got) + } + } + } + } +} diff --git a/plugin/azure/setup.go b/plugin/azure/setup.go new file mode 100644 index 00000000000..1ac0cc72349 --- /dev/null +++ b/plugin/azure/setup.go @@ -0,0 +1,123 @@ +package azure + +import ( + "context" + "strings" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/pkg/fall" + clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/plugin/pkg/upstream" + + azuredns "github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns" + azurerest "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/caddyserver/caddy" +) + +var log = clog.NewWithPlugin("azure") + +func init() { + caddy.RegisterPlugin("azure", caddy.Plugin{ + ServerType: "dns", + Action: setup, + }) +} + +func setup(c *caddy.Controller) error { + env, keys, fall, err := parse(c) + if err != nil { + return plugin.Error("azure", err) + } + ctx := context.Background() + dnsClient := azuredns.NewRecordSetsClient(env.Values[auth.SubscriptionID]) + dnsClient.Authorizer, err = env.GetAuthorizer() + if err != nil { + return c.Errf("failed to create azure plugin: %v", err) + } + h, err := New(ctx, dnsClient, keys, upstream.New()) + if err != nil { + return c.Errf("failed to initialize azure plugin: %v", err) + } + h.Fall = fall + if err := h.Run(ctx); err != nil { + return c.Errf("failed to run azure plugin: %v", err) + } + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + h.Next = next + return h + }) + return nil +} + +func parse(c *caddy.Controller) (auth.EnvironmentSettings, map[string][]string, fall.F, error) { + resourceGroupMapping := map[string][]string{} + resourceGroupSet := map[string]struct{}{} + var err error + var fall fall.F + + azureEnv := azurerest.PublicCloud + env := auth.EnvironmentSettings{Values: map[string]string{}} + + for c.Next() { + args := c.RemainingArgs() + + for i := 0; i < len(args); i++ { + parts := strings.SplitN(args[i], ":", 2) + if len(parts) != 2 { + return env, resourceGroupMapping, fall, c.Errf("invalid resource group / zone '%s'", args[i]) + } + resourceGroup, zoneName := parts[0], parts[1] + if resourceGroup == "" || zoneName == "" { + return env, resourceGroupMapping, fall, c.Errf("invalid resource group / zone '%s'", args[i]) + } + if _, ok := resourceGroupSet[args[i]]; ok { + return env, resourceGroupMapping, fall, c.Errf("conflict zone '%s'", args[i]) + } + + resourceGroupSet[args[i]] = struct{}{} + resourceGroupMapping[resourceGroup] = append(resourceGroupMapping[resourceGroup], zoneName) + } + for c.NextBlock() { + switch c.Val() { + case "subscription": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.SubscriptionID] = c.Val() + case "tenant": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.TenantID] = c.Val() + case "client": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.ClientID] = c.Val() + case "secret": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.ClientSecret] = c.Val() + case "environment": + if !c.NextArg() { + return env, resourceGroupMapping, fall, c.ArgErr() + } + env.Values[auth.ClientSecret] = c.Val() + azureEnv, err = azurerest.EnvironmentFromName(c.Val()) + if err != nil { + return env, resourceGroupMapping, fall, c.Errf("cannot set azure environment: %s", err.Error()) + } + case "fallthrough": + fall.SetZonesFromArgs(c.RemainingArgs()) + default: + return env, resourceGroupMapping, fall, c.Errf("unknown property '%s'", c.Val()) + } + } + } + env.Values[auth.Resource] = azureEnv.ResourceManagerEndpoint + env.Environment = azureEnv + return env, resourceGroupMapping, fall, nil +} diff --git a/plugin/azure/setup_test.go b/plugin/azure/setup_test.go new file mode 100644 index 00000000000..c0b22d5814e --- /dev/null +++ b/plugin/azure/setup_test.go @@ -0,0 +1,72 @@ +package azure + +import ( + "testing" + + "github.com/caddyserver/caddy" +) + +func TestSetup(t *testing.T) { + tests := []struct { + body string + expectedError bool + }{ + {`azure`, false}, + {`azure :`, true}, + {`azure resource_set:zone`, false}, + {`azure resource_set:zone { + tenant +}`, true}, + {`azure resource_set:zone { + tenant +}`, true}, + {`azure resource_set:zone { + client +}`, true}, + {`azure resource_set:zone { + secret +}`, true}, + {`azure resource_set:zone { + subscription +}`, true}, + {`azure resource_set:zone { + upstream 10.0.0.1 +}`, true}, + + {`azure resource_set:zone { + upstream +}`, true}, + {`azure resource_set:zone { + foobar +}`, true}, + {`azure resource_set:zone { + tenant tenant_id + client client_id + secret client_secret + subscription subscription_id +}`, false}, + + {`azure resource_set:zone { + fallthrough +}`, false}, + {`azure resource_set:zone { + environment AZUREPUBLICCLOUD + }`, false}, + {`azure resource_set:zone resource_set:zone { + fallthrough + }`, true}, + {`azure resource_set:zone,zone2 { + fallthrough + }`, false}, + {`azure resource-set { + fallthrough + }`, true}, + } + + for i, test := range tests { + c := caddy.NewTestController("dns", test.body) + if _, _, _, err := parse(c); (err == nil) == test.expectedError { + t.Fatalf("Unexpected errors: %v in test: %d\n\t%s", err, i, test.body) + } + } +} From 1bb753c5b0bf8048f6cf704c92b493c535ef3b0c Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 9 Aug 2019 16:10:26 +0100 Subject: [PATCH 068/324] plugin/azure: clean up readme (#3102) document the environment option and some cleanups. Go over the code and fix/tweak random bits here and there. Condense a few lines here and there. Signed-off-by: Miek Gieben --- plugin/azure/README.md | 38 +++++++------ plugin/azure/azure.go | 124 +++++++++++++---------------------------- plugin/azure/setup.go | 36 ++++++------ 3 files changed, 80 insertions(+), 118 deletions(-) diff --git a/plugin/azure/README.md b/plugin/azure/README.md index 5c7be25a8c2..70cf9a15d73 100644 --- a/plugin/azure/README.md +++ b/plugin/azure/README.md @@ -6,9 +6,9 @@ ## Description -The azure plugin is useful for serving zones from Microsoft Azure DNS. -Thi *azure* plugin supports all the DNS records supported by Azure, viz. A, AAAA, CAA, CNAME, MX, NS, PTR, SOA, SRV, and TXT record types. For a non-existing resource record, zone's SOA response will returned. - +The azure plugin is useful for serving zones from Microsoft Azure DNS. The *azure* plugin supports +all the DNS records supported by Azure, viz. A, AAAA, CNAME, MX, NS, PTR, SOA, SRV, and TXT +record types. ## Syntax @@ -18,35 +18,39 @@ azure RESOURCE_GROUP:ZONE... { client CLIENT_ID secret CLIENT_SECRET subscription SUBSCRIPTION_ID + environment ENVIRONMENT + fallthrough [ZONES...] } ~~~ -* **`RESOURCE_GROUP`** The resource group to which the dns hosted zones belong on Azure +* **RESOURCE_GROUP:ZONE** is the resource group to which the hosted zones belongs on Azure, + and **ZONE** the zone that contains data. + +* **CLIENT_ID** and **CLIENT_SECRET** are the credentials for Azure, and `tenant` specifies the + **TENANT_ID** to be used. **SUBSCRIPTION_ID** is the subscription ID. All of these are needed + to access the data in Azure. -* **`ZONE`** the zone that contains the resource record sets to be - accessed. +* `environment` specifies the Azure **ENVIRONMENT**. * `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. If **ZONES** is omitted, then fallthrough happens for all zones for which the plugin is - authoritative. If specific zones are listed (for example `in-addr.arpa` and `ip6.arpa`), then - only queries for those zones will be subject to fallthrough. - -* `environment` the azure environment to use. Defaults to `AzurePublicCloud`. Possible values: `AzureChinaCloud`, `AzureGermanCloud`, `AzurePublicCloud`, `AzureUSGovernmentCloud`. + authoritative. ## Examples -Enable the *azure* plugin with Azure credentials: +Enable the *azure* plugin with Azure credentials for the zone `example.org`: ~~~ txt -. { - azure resource_group_foo:foo.com { +example.org { + azure resource_group_foo:example.org { tenant 123abc-123abc-123abc-123abc - client 123abc-123abc-123abc-123abc - secret 123abc-123abc-123abc-123abc - subscription 123abc-123abc-123abc-123abc + client 123abc-123abc-123abc-234xyz + subscription 123abc-123abc-123abc-563abc + secret mysecret } } ~~~ ## Also See -- [Azure DNS Overview](https://docs.microsoft.com/en-us/azure/dns/dns-overview) + +The [Azure DNS Overview](https://docs.microsoft.com/en-us/azure/dns/dns-overview). diff --git a/plugin/azure/azure.go b/plugin/azure/azure.go index f3b2fad404e..38d9eecfabc 100644 --- a/plugin/azure/azure.go +++ b/plugin/azure/azure.go @@ -27,38 +27,39 @@ type zones map[string][]*zone // Azure is the core struct of the azure plugin. type Azure struct { - Next plugin.Handler - Fall fall.F zoneNames []string client azuredns.RecordSetsClient upstream *upstream.Upstream zMu sync.RWMutex zones zones + + Next plugin.Handler + Fall fall.F } // New validates the input DNS zones and initializes the Azure struct. -func New(ctx context.Context, dnsClient azuredns.RecordSetsClient, keys map[string][]string, up *upstream.Upstream) (*Azure, error) { +func New(ctx context.Context, dnsClient azuredns.RecordSetsClient, keys map[string][]string) (*Azure, error) { zones := make(map[string][]*zone, len(keys)) - zoneNames := make([]string, 0, len(keys)) - for resourceGroup, inputZoneNames := range keys { - for _, zoneName := range inputZoneNames { - _, err := dnsClient.ListAllByDNSZone(context.Background(), resourceGroup, zoneName, nil, "") - if err != nil { + names := make([]string, len(keys)) + + for resourceGroup, znames := range keys { + for _, name := range znames { + if _, err := dnsClient.ListAllByDNSZone(context.Background(), resourceGroup, name, nil, ""); err != nil { return nil, err } - // Normalizing zoneName to make it fqdn if required. - zoneNameFQDN := dns.Fqdn(zoneName) - if _, ok := zones[zoneNameFQDN]; !ok { - zoneNames = append(zoneNames, zoneNameFQDN) + + fqdn := dns.Fqdn(name) + if _, ok := zones[fqdn]; !ok { + names = append(names, fqdn) } - zones[zoneNameFQDN] = append(zones[zoneNameFQDN], &zone{id: resourceGroup, zone: zoneName, z: file.NewZone(zoneName, "")}) + zones[fqdn] = append(zones[fqdn], &zone{id: resourceGroup, zone: fqdn, z: file.NewZone(fqdn, "")}) } } return &Azure{ client: dnsClient, zones: zones, - zoneNames: zoneNames, - upstream: up, + zoneNames: names, + upstream: upstream.New(), }, nil } @@ -114,107 +115,67 @@ func updateZoneFromResourceSet(recordSet azuredns.RecordSetListResultPage, zName resultTTL := uint32(*(result.RecordSetProperties.TTL)) if result.RecordSetProperties.ARecords != nil { for _, A := range *(result.RecordSetProperties.ARecords) { - a := dns.A{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: resultTTL}, + a := &dns.A{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: resultTTL}, A: net.ParseIP(*(A.Ipv4Address))} - newZ.Insert(&a) + newZ.Insert(a) } } if result.RecordSetProperties.AaaaRecords != nil { for _, AAAA := range *(result.RecordSetProperties.AaaaRecords) { - aaaa := dns.AAAA{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: resultTTL}, + aaaa := &dns.AAAA{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: resultTTL}, AAAA: net.ParseIP(*(AAAA.Ipv6Address))} - newZ.Insert(&aaaa) + newZ.Insert(aaaa) } } if result.RecordSetProperties.MxRecords != nil { for _, MX := range *(result.RecordSetProperties.MxRecords) { - mx := dns.MX{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypeMX, - Class: dns.ClassINET, - Ttl: resultTTL}, + mx := &dns.MX{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: resultTTL}, Preference: uint16(*(MX.Preference)), Mx: dns.Fqdn(*(MX.Exchange))} - newZ.Insert(&mx) + newZ.Insert(mx) } } if result.RecordSetProperties.PtrRecords != nil { for _, PTR := range *(result.RecordSetProperties.PtrRecords) { - ptr := dns.PTR{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypePTR, - Class: dns.ClassINET, - Ttl: resultTTL}, + ptr := &dns.PTR{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: resultTTL}, Ptr: dns.Fqdn(*(PTR.Ptrdname))} - newZ.Insert(&ptr) + newZ.Insert(ptr) } } if result.RecordSetProperties.SrvRecords != nil { for _, SRV := range *(result.RecordSetProperties.SrvRecords) { - srv := dns.SRV{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypeSRV, - Class: dns.ClassINET, - Ttl: resultTTL}, + srv := &dns.SRV{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeSRV, Class: dns.ClassINET, Ttl: resultTTL}, Priority: uint16(*(SRV.Priority)), Weight: uint16(*(SRV.Weight)), Port: uint16(*(SRV.Port)), Target: dns.Fqdn(*(SRV.Target))} - newZ.Insert(&srv) + newZ.Insert(srv) } } if result.RecordSetProperties.TxtRecords != nil { for _, TXT := range *(result.RecordSetProperties.TxtRecords) { - txt := dns.TXT{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypeTXT, - Class: dns.ClassINET, - Ttl: resultTTL}, + txt := &dns.TXT{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: resultTTL}, Txt: *(TXT.Value)} - newZ.Insert(&txt) + newZ.Insert(txt) } } if result.RecordSetProperties.NsRecords != nil { for _, NS := range *(result.RecordSetProperties.NsRecords) { - ns := dns.NS{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: resultTTL}, + ns := &dns.NS{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: resultTTL}, Ns: *(NS.Nsdname)} - newZ.Insert(&ns) + newZ.Insert(ns) } } if result.RecordSetProperties.SoaRecord != nil { SOA := result.RecordSetProperties.SoaRecord - soa := dns.SOA{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypeSOA, - Class: dns.ClassINET, - Ttl: resultTTL}, + soa := &dns.SOA{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: resultTTL}, Minttl: uint32(*(SOA.MinimumTTL)), Expire: uint32(*(SOA.ExpireTime)), Retry: uint32(*(SOA.RetryTime)), @@ -222,19 +183,14 @@ func updateZoneFromResourceSet(recordSet azuredns.RecordSetListResultPage, zName Serial: uint32(*(SOA.SerialNumber)), Mbox: dns.Fqdn(*(SOA.Email)), Ns: *(SOA.Host)} - newZ.Insert(&soa) + newZ.Insert(soa) } if result.RecordSetProperties.CnameRecord != nil { CNAME := result.RecordSetProperties.CnameRecord.Cname - cname := dns.CNAME{ - Hdr: dns.RR_Header{ - Name: resultFqdn, - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: resultTTL}, + cname := &dns.CNAME{Hdr: dns.RR_Header{Name: resultFqdn, Rrtype: dns.TypeCNAME, Class: dns.ClassINET, Ttl: resultTTL}, Target: dns.Fqdn(*CNAME)} - newZ.Insert(&cname) + newZ.Insert(cname) } } return newZ @@ -245,13 +201,13 @@ func (h *Azure) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) state := request.Request{W: w, Req: r} qname := state.Name() - zName := plugin.Zones(h.zoneNames).Matches(qname) - if zName == "" { + zone := plugin.Zones(h.zoneNames).Matches(qname) + if zone == "" { return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) } - z, ok := h.zones[zName] // ok true if we are authoritive for the zone. - if !ok || z == nil { + zones, ok := h.zones[zone] // ok true if we are authoritive for the zone. + if !ok || zones == nil { return dns.RcodeServerFailure, nil } @@ -259,9 +215,9 @@ func (h *Azure) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) m.SetReply(r) m.Authoritative = true var result file.Result - for _, hostedZone := range z { + for _, z := range zones { h.zMu.RLock() - m.Answer, m.Ns, m.Extra, result = hostedZone.z.Lookup(ctx, state, qname) + m.Answer, m.Ns, m.Extra, result = z.z.Lookup(ctx, state, qname) h.zMu.RUnlock() // record type exists for this name (NODATA). diff --git a/plugin/azure/setup.go b/plugin/azure/setup.go index 1ac0cc72349..ef5711c00e8 100644 --- a/plugin/azure/setup.go +++ b/plugin/azure/setup.go @@ -8,7 +8,6 @@ import ( "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/pkg/fall" clog "github.com/coredns/coredns/plugin/pkg/log" - "github.com/coredns/coredns/plugin/pkg/upstream" azuredns "github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns" azurerest "github.com/Azure/go-autorest/autorest/azure" @@ -31,19 +30,21 @@ func setup(c *caddy.Controller) error { return plugin.Error("azure", err) } ctx := context.Background() + dnsClient := azuredns.NewRecordSetsClient(env.Values[auth.SubscriptionID]) - dnsClient.Authorizer, err = env.GetAuthorizer() - if err != nil { - return c.Errf("failed to create azure plugin: %v", err) + if dnsClient.Authorizer, err = env.GetAuthorizer(); err != nil { + return plugin.Error("azure", err) } - h, err := New(ctx, dnsClient, keys, upstream.New()) + + h, err := New(ctx, dnsClient, keys) if err != nil { - return c.Errf("failed to initialize azure plugin: %v", err) + return plugin.Error("azure", err) } h.Fall = fall if err := h.Run(ctx); err != nil { - return c.Errf("failed to run azure plugin: %v", err) + return plugin.Error("azure", err) } + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { h.Next = next return h @@ -54,26 +55,25 @@ func setup(c *caddy.Controller) error { func parse(c *caddy.Controller) (auth.EnvironmentSettings, map[string][]string, fall.F, error) { resourceGroupMapping := map[string][]string{} resourceGroupSet := map[string]struct{}{} - var err error - var fall fall.F - azureEnv := azurerest.PublicCloud env := auth.EnvironmentSettings{Values: map[string]string{}} + var fall fall.F + for c.Next() { args := c.RemainingArgs() for i := 0; i < len(args); i++ { parts := strings.SplitN(args[i], ":", 2) if len(parts) != 2 { - return env, resourceGroupMapping, fall, c.Errf("invalid resource group / zone '%s'", args[i]) + return env, resourceGroupMapping, fall, c.Errf("invalid resource group/zone: %q", args[i]) } resourceGroup, zoneName := parts[0], parts[1] if resourceGroup == "" || zoneName == "" { - return env, resourceGroupMapping, fall, c.Errf("invalid resource group / zone '%s'", args[i]) + return env, resourceGroupMapping, fall, c.Errf("invalid resource group/zone: %q", args[i]) } if _, ok := resourceGroupSet[args[i]]; ok { - return env, resourceGroupMapping, fall, c.Errf("conflict zone '%s'", args[i]) + return env, resourceGroupMapping, fall, c.Errf("conflicting zone: %q", args[i]) } resourceGroupSet[args[i]] = struct{}{} @@ -106,18 +106,20 @@ func parse(c *caddy.Controller) (auth.EnvironmentSettings, map[string][]string, return env, resourceGroupMapping, fall, c.ArgErr() } env.Values[auth.ClientSecret] = c.Val() - azureEnv, err = azurerest.EnvironmentFromName(c.Val()) - if err != nil { - return env, resourceGroupMapping, fall, c.Errf("cannot set azure environment: %s", err.Error()) + var err error + if azureEnv, err = azurerest.EnvironmentFromName(c.Val()); err != nil { + return env, resourceGroupMapping, fall, c.Errf("cannot set azure environment: %q", err.Error()) } case "fallthrough": fall.SetZonesFromArgs(c.RemainingArgs()) default: - return env, resourceGroupMapping, fall, c.Errf("unknown property '%s'", c.Val()) + return env, resourceGroupMapping, fall, c.Errf("unknown property: %q", c.Val()) } } } + env.Values[auth.Resource] = azureEnv.ResourceManagerEndpoint env.Environment = azureEnv + return env, resourceGroupMapping, fall, nil } From 6a6e9a9b33731656b51655072951649d9e716613 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 9 Aug 2019 16:40:30 +0100 Subject: [PATCH 069/324] mechanical: run: go gen and make -f Makefile.doc (#3104) no manual updates; just generated stuff. Signed-off-by: Miek Gieben --- man/coredns-azure.7 | 70 ++++++++++++++++++++++++++++++++++++++++ man/coredns-kubernetes.7 | 2 +- man/coredns-route53.7 | 27 +++++++++++++++- plugin/chaos/zowners.go | 2 +- 4 files changed, 98 insertions(+), 3 deletions(-) create mode 100644 man/coredns-azure.7 diff --git a/man/coredns-azure.7 b/man/coredns-azure.7 new file mode 100644 index 00000000000..2a4bdcd0129 --- /dev/null +++ b/man/coredns-azure.7 @@ -0,0 +1,70 @@ +.\" Generated by Mmark Markdown Processer - mmark.nl +.TH "COREDNS-AZURE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fIazure\fP - enables serving zone data from Microsoft Azure DNS service. + +.SH "DESCRIPTION" +.PP +The azure plugin is useful for serving zones from Microsoft Azure DNS. The \fIazure\fP plugin supports +all the DNS records supported by Azure, viz. A, AAAA, CNAME, MX, NS, PTR, SOA, SRV, and TXT +record types. + +.SH "SYNTAX" +.PP +.RS + +.nf +azure RESOURCE\_GROUP:ZONE... { + tenant TENANT\_ID + client CLIENT\_ID + secret CLIENT\_SECRET + subscription SUBSCRIPTION\_ID + environment ENVIRONMENT + fallthrough [ZONES...] +} + +.fi +.RE + +.IP \(bu 4 +\fBRESOURCE_GROUP:ZONE\fP is the resource group to which the hosted zones belongs on Azure, +and \fBZONE\fP the zone that contains data. +.IP \(bu 4 +\fBCLIENT_ID\fP and \fBCLIENT_SECRET\fP are the credentials for Azure, and \fB\fCtenant\fR specifies the +\fBTENANT_ID\fP to be used. \fBSUBSCRIPTION_ID\fP is the subscription ID. All of these are needed +to access the data in Azure. +.IP \(bu 4 +\fB\fCenvironment\fR specifies the Azure \fBENVIRONMENT\fP. +.IP \(bu 4 +\fB\fCfallthrough\fR If zone matches and no record can be generated, pass request to the next plugin. +If \fBZONES\fP is omitted, then fallthrough happens for all zones for which the plugin is +authoritative. + + +.SH "EXAMPLES" +.PP +Enable the \fIazure\fP plugin with Azure credentials for the zone \fB\fCexample.org\fR: + +.PP +.RS + +.nf +example.org { + azure resource\_group\_foo:example.org { + tenant 123abc\-123abc\-123abc\-123abc + client 123abc\-123abc\-123abc\-234xyz + subscription 123abc\-123abc\-123abc\-563abc + secret mysecret + } +} + +.fi +.RE + +.SH "ALSO SEE" +.PP +The Azure DNS Overview +\[la]https://docs.microsoft.com/en-us/azure/dns/dns-overview\[ra]. + diff --git a/man/coredns-kubernetes.7 b/man/coredns-kubernetes.7 index 7b21e961cbd..ddc7cb021b1 100644 --- a/man/coredns-kubernetes.7 +++ b/man/coredns-kubernetes.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-KUBERNETES" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-KUBERNETES" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-route53.7 b/man/coredns-route53.7 index 943f1433f9e..e28a92f42ed 100644 --- a/man/coredns-route53.7 +++ b/man/coredns-route53.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ROUTE53" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ROUTE53" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -22,6 +22,7 @@ route53 [ZONE:HOSTED\_ZONE\_ID...] { aws\_access\_key [AWS\_ACCESS\_KEY\_ID AWS\_SECRET\_ACCESS\_KEY] credentials PROFILE [FILENAME] fallthrough [ZONES...] + refresh DURATION } .fi @@ -54,6 +55,14 @@ only queries for those zones will be subject to fallthrough. .IP \(bu 4 \fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block. +.IP \(bu 4 +\fB\fCrefresh\fR can be used to control how long between record retrievals from Route 53. It requires +a duration string as a parameter to specify the duration between update cycles. Each update +cycle may result in many AWS API calls depending on how many domains use this plugin and how +many records are in each. Adjusting the update frequency may help reduce the potential of API +rate-limiting imposed by AWS. +.IP \(bu 4 +\fBDURATION\fP A duration string. Defaults to \fB\fC1m\fR. If units are unspecified, seconds are assumed. .SH "EXAMPLES" @@ -118,3 +127,19 @@ Enable route53 with multiple hosted zones with the same domain: .fi .RE +.PP +Enable route53 and refresh records every 3 minutes + +.PP +.RS + +.nf +\&. { + route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { + refresh 3m + } +} + +.fi +.RE + diff --git a/plugin/chaos/zowners.go b/plugin/chaos/zowners.go index d747069dc7d..41b8f690eec 100644 --- a/plugin/chaos/zowners.go +++ b/plugin/chaos/zowners.go @@ -1,4 +1,4 @@ package chaos // Owners are all GitHub handlers of all maintainers. -var Owners = []string{"bradbeam", "chrisohaver", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "yongtang"} +var Owners = []string{"bradbeam", "chrisohaver", "darshanime", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "yongtang"} From 3a59c833a09eafd8e67f6ac1686b30193b6220c0 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Fri, 9 Aug 2019 17:14:48 -0400 Subject: [PATCH 070/324] plugin/kubernetes: Don't do a zone transfer for NS requests (#3098) * fix switch order * remove fallthough * add test * fix test * distingush nxdomain/nodata for at least first subdomain of zone * restore fallthough; reorder switch cases --- plugin/kubernetes/handler.go | 4 ++-- plugin/kubernetes/handler_test.go | 8 ++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/plugin/kubernetes/handler.go b/plugin/kubernetes/handler.go index 324e08da69c..a7df99d9077 100644 --- a/plugin/kubernetes/handler.go +++ b/plugin/kubernetes/handler.go @@ -28,6 +28,8 @@ func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.M ) switch state.QType() { + case dns.TypeAXFR, dns.TypeIXFR: + k.Transfer(ctx, state) case dns.TypeA: records, err = plugin.A(ctx, &k, zone, state, nil, plugin.Options{}) case dns.TypeAAAA: @@ -50,8 +52,6 @@ func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.M break } fallthrough - case dns.TypeAXFR, dns.TypeIXFR: - k.Transfer(ctx, state) default: // Do a fake A lookup, so we can distinguish between NODATA and NXDOMAIN _, err = plugin.A(ctx, &k, zone, state, nil, plugin.Options{}) diff --git a/plugin/kubernetes/handler_test.go b/plugin/kubernetes/handler_test.go index 0efd03c0767..a517e8b8919 100644 --- a/plugin/kubernetes/handler_test.go +++ b/plugin/kubernetes/handler_test.go @@ -341,6 +341,14 @@ var dnsTestCases = []test.Case{ test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), }, }, + // NS query for qname != zone (existing domain) + { + Qname: "svc.cluster.local.", Qtype: dns.TypeNS, + Rcode: dns.RcodeSuccess, + Ns: []dns.RR{ + test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), + }, + }, } func TestServeDNS(t *testing.T) { From ebe6a41e5083ff5d4402c235955e9919e93560fe Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 10 Aug 2019 07:19:59 +0000 Subject: [PATCH 071/324] deps: up klog to 0.4.0 (#3109) New version to get the fix for: https://github.com/kubernetes/klog/pull/83 which can fix: https://github.com/coredns/coredns/pull/3055 Signed-off-by: Miek Gieben --- go.mod | 2 +- go.sum | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 35ba659df58..d8f965a00b6 100644 --- a/go.mod +++ b/go.mod @@ -63,7 +63,7 @@ require ( k8s.io/api v0.0.0-20190313235455-40a48860b5ab k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 k8s.io/client-go v11.0.0+incompatible - k8s.io/klog v0.3.3 + k8s.io/klog v0.4.0 k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 // indirect k8s.io/utils v0.0.0-20190529001817-6999998975a7 // indirect sigs.k8s.io/yaml v1.1.0 // indirect diff --git a/go.sum b/go.sum index 4ae5cff1689..cea2e4fa4d0 100644 --- a/go.sum +++ b/go.sum @@ -105,6 +105,7 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -438,6 +439,8 @@ k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyod k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 h1:zia7dTzfEtdiSUxi9cXUDsSQH2xE6igmGKyFn2on/9A= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/utils v0.0.0-20190529001817-6999998975a7 h1:5UOdmwfY+7XsXvo26XeCDu9GhHJPkO1z8Mcz5AHMnOE= From 1ef24a881391047d55628fc32b058a60552c9ede Mon Sep 17 00:00:00 2001 From: Andrey Meshkov Date: Mon, 12 Aug 2019 21:24:16 +0300 Subject: [PATCH 072/324] Fix handling truncated responses in forward (#3110) * Fix handling truncated responses in forward * Removed error check after proxy.Connect See https://github.com/coredns/coredns/pull/3110/files#r312942826 --- plugin/forward/forward.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugin/forward/forward.go b/plugin/forward/forward.go index da2e175fe35..9d37780da20 100644 --- a/plugin/forward/forward.go +++ b/plugin/forward/forward.go @@ -109,9 +109,6 @@ func (f *Forward) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg opts := f.opts for { ret, err = proxy.Connect(ctx, state, opts) - if err == nil { - break - } if err == ErrCachedClosed { // Remote side closed conn, can only happen with TCP. continue } From 6466d010557383620b5887f2754560773ca55088 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 12 Aug 2019 18:24:57 +0000 Subject: [PATCH 073/324] doc: formatting and point to plugin.md (#3107) * doc: formatting and point to plugin.md Slight tweaks in CONTRIBUTING.md and point to plugin.md which also has a bunch of info. Signed-off-by: Miek Gieben * Update plugin.md Co-Authored-By: Chris O'Haver * Add minimize to configuration knobs Signed-off-by: Miek Gieben * more Signed-off-by: Miek Gieben --- CONTRIBUTING.md | 25 ++++++++++++++++--------- plugin.md | 10 ++++++++++ 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8774238a07b..d3846f664f2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -29,14 +29,18 @@ tests to assert your change is working properly and is thoroughly covered. First, please [search](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with a variety of keywords to ensure your suggestion/proposal is new. -Please also check for existing pull requests to see if someone is already working on this. We want to avoid duplication of effort. +Please also check for existing pull requests to see if someone is already working on this. We want +to avoid duplication of effort. -If the proposal is new and no one has opened pull request yet, you may open either an issue or a pull request for discussion and feedback. +If the proposal is new and no one has opened pull request yet, you may open either an issue or a +pull request for discussion and feedback. If you are going to spend significant time implementing code for a pull request, best to open an issue first and "claim" it and get feedback before you invest a lot of time. -**If someone already opened a pull request, but you think the pull request has stalled and you would like to open another pull request for the same or similar feature, get some of the maintainers (see [OWNERS](OWNERS)) involved to resolve the situation and move things forward.** +**If someone already opened a pull request, but you think the pull request has stalled and you would +like to open another pull request for the same or similar feature, get some of the maintainers (see +[OWNERS](OWNERS)) involved to resolve the situation and move things forward.** If possible make a pull request as small as possible, or submit multiple pull request to complete a feature. Smaller means: easier to understand and review. This in turn means things can be merged @@ -44,13 +48,16 @@ faster. ## New Plugins -A new plugin is (usually) about 1000 lines of Go. This includes tests and some plugin boiler -plate. This is a considerable amount of code and will take time to review. To prevent too much back and -forth it is advisable to start with the plugin's `README.md`; This will be its main documentation and will help -nail down the correct name of the plugin and its various config options. +A new plugin is (usually) about 1000 lines of Go. This includes tests and some plugin boiler plate. +This is a considerable amount of code and will take time to review. To prevent too much back and +forth it is advisable to start with the plugin's `README.md`; This will be its main documentation +and will help nail down the correct name of the plugin and its various config options. -From there it can work its way through the rest (`setup.go`, the `ServeDNS` handler function, -etc.). Doing this will help the reviewers, as each chunk of code is relatively small. +From there it can work its way through the rest (`setup.go`, the `ServeDNS` handler function, etc.). +Doing this will help the reviewers, as each chunk of code is relatively small. + +Also read [plugin.md](https://raw.githubusercontent.com/coredns/coredns/master/plugin.md) for +advice on how to write a plugin. ## Updating Dependencies diff --git a/plugin.md b/plugin.md index 7b7655022b9..f00cfe712ab 100644 --- a/plugin.md +++ b/plugin.md @@ -117,6 +117,16 @@ The `fallthrough` directive should optionally accept a list of zones. Only queri in one of those zones should be allowed to fallthrough. See `plugin/pkg/fallthrough` for the implementation. +## General Guidelines + +Some general guidelines: + +* logging time duration should be done in seconds (call the `Seconds()` method on any duration). +* keep logging to a minimum. +* call the main config parse function just `parse`. +* try to minimize the number of knobs in the configuration. +* use `plugin.Error()` to wrap errors returned from the `setup` function. + ## Qualifying for Main Repo Plugins for CoreDNS can live out-of-tree, `plugin.cfg` defaults to CoreDNS' repo but other From cdbe55e551e991803f2cb480f4d7407b17a62707 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2019 18:26:01 +0000 Subject: [PATCH 074/324] build(deps): bump github.com/prometheus/client_golang (#3111) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.0.0 to 1.1.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.0.0...v1.1.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d8f965a00b6..02c9613f040 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/openzipkin/zipkin-go-opentracing v0.3.5 // indirect github.com/philhofer/fwd v1.0.0 // indirect github.com/pkg/errors v0.8.1 // indirect - github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/common v0.6.0 github.com/sirupsen/logrus v1.4.2 // indirect diff --git a/go.sum b/go.sum index cea2e4fa4d0..6c5d728a1c0 100644 --- a/go.sum +++ b/go.sum @@ -54,6 +54,7 @@ github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqTKo= github.com/caddyserver/caddy v1.0.1/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= @@ -132,6 +133,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -173,6 +175,7 @@ github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -203,8 +206,10 @@ github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -239,6 +244,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= @@ -252,6 +259,7 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -355,6 +363,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= From a5de7932c5d4b480c15b4e4d8a903af373c4ec81 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2019 18:26:37 +0000 Subject: [PATCH 075/324] build(deps): bump github.com/Azure/go-autorest/autorest (#3112) Bumps [github.com/Azure/go-autorest/autorest](https://github.com/Azure/go-autorest) from 0.5.0 to 0.8.0. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/autorest/v0.5.0...autorest/v0.8.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 02c9613f040..d6b614a9d2e 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible - github.com/Azure/go-autorest/autorest v0.5.0 + github.com/Azure/go-autorest/autorest v0.8.0 github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect diff --git a/go.sum b/go.sum index 6c5d728a1c0..612653c4ac6 100644 --- a/go.sum +++ b/go.sum @@ -3,16 +3,23 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= +contrib.go.opencensus.io/exporter/ocagent v0.4.6/go.mod h1:YuG83h+XWwqWjvCqn7vK4KSyLKhThY3+gNGQ37iS2V0= contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-sdk-for-go v31.1.0+incompatible h1:5SzgnfAvUBdBwNTN23WLfZoCt/rGhLvd7QdCAaFXgY4= github.com/Azure/azure-sdk-for-go v31.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= github.com/Azure/go-autorest/autorest v0.5.0 h1:Mlm9qy2fpQ9MvfyI41G2Zf5B4CsgjjNbLOWszfK6KrY= github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= +github.com/Azure/go-autorest/autorest v0.8.0 h1:hdhcXqJ/T98aigNKEs1LjXlGbxWGSIP0rA/GMc15UPA= +github.com/Azure/go-autorest/autorest v0.8.0/go.mod h1:io2I5Mipuszn4buXaxt/RzC43Yf6aBtkgyUFF2HBdLY= github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.2.0 h1:7IBDu1jgh+ADHXnEYExkV9RE/ztOOlxdACkkPRthGKw= github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/adal v0.4.0 h1:e4yveyEm5WvJDtmxJCY8fW1d3MY7vlQw/dVgL7kEmAg= +github.com/Azure/go-autorest/autorest/adal v0.4.0/go.mod h1:n1iM1u+ZMYni6IIY+FB82IT/DbSdC3T3gO1qFQ/3jEw= github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 h1:YgO/vSnJEc76NLw2ecIXvXa8bDWiqf1pOJzARAoZsYU= github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= github.com/Azure/go-autorest/autorest/azure/cli v0.1.0 h1:YTtBrcb6mhA+PoSW8WxFDoIIyjp13XqJeX80ssQtri4= @@ -20,12 +27,15 @@ github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/Pzkfe github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/Azure/go-autorest/tracing v0.4.0 h1:r5D+n0u8XGFQIpIYk0xlKLDgiArnnIsfUjWANW5Wto0= +github.com/Azure/go-autorest/tracing v0.4.0/go.mod h1:sVZ/n8H0f4naUjHNvSe2qjNiC3oV6+8CCqU9mhEvav8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= @@ -60,6 +70,7 @@ github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqT github.com/caddyserver/caddy v1.0.1/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.1.0-0.20181214143942-ba49f56771b8/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= @@ -116,6 +127,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -153,10 +165,14 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmo github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.3 h1:wZ6biDjnBgIOf5t+r8dYsoGWH1Zl2Ps32OxD80Odbk8= github.com/grpc-ecosystem/grpc-gateway v1.8.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -228,6 +244,8 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go-opentracing v0.3.5 h1:nZPvd2EmRKP+NzFdSuxZF/FG4Y4W2gn6ugXliTAu9o0= github.com/openzipkin/zipkin-go-opentracing v0.3.5/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= @@ -240,6 +258,7 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= @@ -250,11 +269,13 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1: github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= @@ -285,6 +306,9 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= +go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -308,7 +332,9 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -317,6 +343,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -334,6 +361,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -349,6 +377,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -374,6 +403,7 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -386,16 +416,22 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -403,6 +439,9 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD5x+fRy6U0SXxZehzUHRSYUJcKfII= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -435,6 +474,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 04fa7d1e71f985ad6e47dc80581eb0d23f57bb51 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2019 18:26:52 +0000 Subject: [PATCH 076/324] build(deps): bump gopkg.in/DataDog/dd-trace-go.v1 from 1.16.0 to 1.16.1 (#3114) Bumps [gopkg.in/DataDog/dd-trace-go.v1](https://github.com/DataDog/dd-trace-go) from 1.16.0 to 1.16.1. - [Release notes](https://github.com/DataDog/dd-trace-go/releases) - [Commits](https://github.com/DataDog/dd-trace-go/compare/v1.16.0...v1.16.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d6b614a9d2e..854cacde2a5 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.22.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190313235455-40a48860b5ab k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 diff --git a/go.sum b/go.sum index 612653c4ac6..6ff10c6cd1a 100644 --- a/go.sum +++ b/go.sum @@ -454,6 +454,8 @@ gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 h1:2LhklnAJsRSelbnBrrE5QuRleRDkmOh2JWxOt gopkg.in/DataDog/dd-trace-go.v1 v1.15.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4nH2UWo= +gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= From 555e4877ae07d52094a247e7dc167c229168a3db Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2019 18:28:39 +0000 Subject: [PATCH 077/324] build(deps): bump github.com/aws/aws-sdk-go from 1.21.2 to 1.22.3 (#3116) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.21.2 to 1.22.3. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.21.2...v1.22.3) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 854cacde2a5..ea184b66d0c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect - github.com/aws/aws-sdk-go v1.21.2 + github.com/aws/aws-sdk-go v1.22.3 github.com/caddyserver/caddy v1.0.1 github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index 6ff10c6cd1a..1b939a7934f 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,8 @@ github.com/aws/aws-sdk-go v1.21.1 h1:IOFDnCEDybcw4V8nbKqyyjBu+vpu7hFYSfZqNuogi7I github.com/aws/aws-sdk-go v1.21.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.21.2 h1:CqbWrQzi7s8J2F0TRRdLvTr0+bt5Zxo2IDoFNGsAiUg= github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.22.3 h1:6hh+6uqguPNPLtnb6rYLJnonwixttKMbvZYWVUdms98= +github.com/aws/aws-sdk-go v1.22.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From a64ff8cc0a263f00c817612c2c4aca79328335ab Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Mon, 12 Aug 2019 14:43:22 -0400 Subject: [PATCH 078/324] fix NXDOMAIN/NODATA fallthough case (#3118) --- plugin/kubernetes/handler.go | 4 +++- plugin/kubernetes/handler_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/plugin/kubernetes/handler.go b/plugin/kubernetes/handler.go index a7df99d9077..6ff8040a4f7 100644 --- a/plugin/kubernetes/handler.go +++ b/plugin/kubernetes/handler.go @@ -54,7 +54,9 @@ func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.M fallthrough default: // Do a fake A lookup, so we can distinguish between NODATA and NXDOMAIN - _, err = plugin.A(ctx, &k, zone, state, nil, plugin.Options{}) + fake := state.NewWithQuestion(state.QName(), dns.TypeA) + fake.Zone = state.Zone + _, err = plugin.A(ctx, &k, zone, fake, nil, plugin.Options{}) } if k.IsNameError(err) { diff --git a/plugin/kubernetes/handler_test.go b/plugin/kubernetes/handler_test.go index a517e8b8919..a979386b971 100644 --- a/plugin/kubernetes/handler_test.go +++ b/plugin/kubernetes/handler_test.go @@ -349,6 +349,30 @@ var dnsTestCases = []test.Case{ test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), }, }, + // NS query for qname != zone (existing domain) + { + Qname: "testns.svc.cluster.local.", Qtype: dns.TypeNS, + Rcode: dns.RcodeSuccess, + Ns: []dns.RR{ + test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), + }, + }, + // NS query for qname != zone (non existing domain) + { + Qname: "foo.cluster.local.", Qtype: dns.TypeNS, + Rcode: dns.RcodeNameError, + Ns: []dns.RR{ + test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), + }, + }, + // NS query for qname != zone (non existing domain) + { + Qname: "foo.svc.cluster.local.", Qtype: dns.TypeNS, + Rcode: dns.RcodeNameError, + Ns: []dns.RR{ + test.SOA("cluster.local. 5 IN SOA ns.dns.cluster.local. hostmaster.cluster.local. 1499347823 7200 1800 86400 5"), + }, + }, } func TestServeDNS(t *testing.T) { From ca57dd35684d10d48d8c01586ddfc113cded671b Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 13 Aug 2019 13:56:21 +0000 Subject: [PATCH 079/324] deps: update (#3120) Just building in master updates the deps. Pushing this as a PR to get a clean build back. Signed-off-by: Miek Gieben --- go.mod | 6 ++++-- go.sum | 5 +++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index ea184b66d0c..b4e452793db 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect + contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible github.com/Azure/go-autorest/autorest v0.8.0 github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 @@ -22,7 +23,6 @@ require ( github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/golang/protobuf v1.3.2 - github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/googleapis/gnostic v0.2.0 // indirect github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect @@ -33,6 +33,8 @@ require ( github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/miekg/dns v1.1.15 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 @@ -54,7 +56,7 @@ require ( go.uber.org/zap v1.9.1 // indirect golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect - golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb + golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.22.0 diff --git a/go.sum b/go.sum index 1b939a7934f..e8ded047fb6 100644 --- a/go.sum +++ b/go.sum @@ -66,6 +66,7 @@ github.com/aws/aws-sdk-go v1.22.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqTKo= @@ -147,6 +148,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -193,6 +195,7 @@ github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -282,6 +285,7 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -394,6 +398,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From ebc465d0dca8c98d44272e79acc1a43d63805520 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 13 Aug 2019 15:02:29 +0000 Subject: [PATCH 080/324] plugin/route53: various updates (#3108) In the setup function use plugin.Error() to wrap the errors with the plugin name. Because there isn't a separate setup() function this is done for all returned errors. Remove *upstream.Upstream from the New parameters as this is always set and adjust the tests to account for this. Signed-off-by: Miek Gieben --- plugin/route53/route53.go | 12 ++++++------ plugin/route53/route53_test.go | 5 ++--- plugin/route53/setup.go | 25 +++++++++++-------------- 3 files changed, 19 insertions(+), 23 deletions(-) diff --git a/plugin/route53/route53.go b/plugin/route53/route53.go index 346defa66d6..644f7b43184 100644 --- a/plugin/route53/route53.go +++ b/plugin/route53/route53.go @@ -46,11 +46,11 @@ type zone struct { type zones map[string][]*zone // New reads from the keys map which uses domain names as its key and hosted -// zone id lists as its values, validates that each domain name/zone id pair does -// exist, and returns a new *Route53. In addition to this, upstream is passed -// for doing recursive queries against CNAMEs. -// Returns error if it cannot verify any given domain name/zone id pair. -func New(ctx context.Context, c route53iface.Route53API, keys map[string][]string, up *upstream.Upstream, refresh time.Duration) (*Route53, error) { +// zone id lists as its values, validates that each domain name/zone id pair +// does exist, and returns a new *Route53. In addition to this, upstream is use +// for doing recursive queries against CNAMEs. Returns error if it cannot +// verify any given domain name/zone id pair. +func New(ctx context.Context, c route53iface.Route53API, keys map[string][]string, refresh time.Duration) (*Route53, error) { zones := make(map[string][]*zone, len(keys)) zoneNames := make([]string, 0, len(keys)) for dns, hostedZoneIDs := range keys { @@ -72,7 +72,7 @@ func New(ctx context.Context, c route53iface.Route53API, keys map[string][]strin client: c, zoneNames: zoneNames, zones: zones, - upstream: up, + upstream: upstream.New(), refresh: refresh, }, nil } diff --git a/plugin/route53/route53_test.go b/plugin/route53/route53_test.go index 64ea90d6a0f..8db42d2520f 100644 --- a/plugin/route53/route53_test.go +++ b/plugin/route53/route53_test.go @@ -9,7 +9,6 @@ import ( "github.com/coredns/coredns/plugin/pkg/dnstest" "github.com/coredns/coredns/plugin/pkg/fall" - "github.com/coredns/coredns/plugin/pkg/upstream" "github.com/coredns/coredns/plugin/test" crequest "github.com/coredns/coredns/request" @@ -80,7 +79,7 @@ func (fakeRoute53) ListResourceRecordSetsPagesWithContext(_ aws.Context, in *rou func TestRoute53(t *testing.T) { ctx := context.Background() - r, err := New(ctx, fakeRoute53{}, map[string][]string{"bad.": {"0987654321"}}, &upstream.Upstream{}, time.Duration(1) * time.Minute) + r, err := New(ctx, fakeRoute53{}, map[string][]string{"bad.": {"0987654321"}}, time.Duration(1)*time.Minute) if err != nil { t.Fatalf("Failed to create Route53: %v", err) } @@ -88,7 +87,7 @@ func TestRoute53(t *testing.T) { t.Fatalf("Expected errors for zone bad.") } - r, err = New(ctx, fakeRoute53{}, map[string][]string{"org.": {"1357986420", "1234567890"}, "gov.": {"Z098765432", "1234567890"}}, &upstream.Upstream{}, time.Duration(90) * time.Second) + r, err = New(ctx, fakeRoute53{}, map[string][]string{"org.": {"1357986420", "1234567890"}, "gov.": {"Z098765432", "1234567890"}}, time.Duration(90)*time.Second) if err != nil { t.Fatalf("Failed to create Route53: %v", err) } diff --git a/plugin/route53/setup.go b/plugin/route53/setup.go index 918b0e56a29..23fb5e74ff8 100644 --- a/plugin/route53/setup.go +++ b/plugin/route53/setup.go @@ -11,7 +11,6 @@ import ( "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/pkg/fall" clog "github.com/coredns/coredns/plugin/pkg/log" - "github.com/coredns/coredns/plugin/pkg/upstream" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" @@ -54,8 +53,6 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro var providers []credentials.Provider var fall fall.F - up := upstream.New() - refresh := time.Duration(1) * time.Minute // default update frequency to 1 minute args := c.RemainingArgs() @@ -63,14 +60,14 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro for i := 0; i < len(args); i++ { parts := strings.SplitN(args[i], ":", 2) if len(parts) != 2 { - return c.Errf("invalid zone '%s'", args[i]) + return plugin.Error("route53", c.Errf("invalid zone '%s'", args[i])) } dns, hostedZoneID := parts[0], parts[1] if dns == "" || hostedZoneID == "" { - return c.Errf("invalid zone '%s'", args[i]) + return plugin.Error("route53", c.Errf("invalid zone '%s'", args[i])) } if _, ok := keyPairs[args[i]]; ok { - return c.Errf("conflict zone '%s'", args[i]) + return plugin.Error("route53", c.Errf("conflict zone '%s'", args[i])) } keyPairs[args[i]] = struct{}{} @@ -82,7 +79,7 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro case "aws_access_key": v := c.RemainingArgs() if len(v) < 2 { - return c.Errf("invalid access key '%v'", v) + return plugin.Error("route53", c.Errf("invalid access key '%v'", v)) } providers = append(providers, &credentials.StaticProvider{ Value: credentials.Value{ @@ -112,16 +109,16 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro } refresh, err = time.ParseDuration(refreshStr) if err != nil { - return c.Errf("Unable to parse duration: '%v'", err) + return plugin.Error("route53", c.Errf("Unable to parse duration: '%v'", err)) } if refresh <= 0 { - return c.Errf("refresh interval must be greater than 0: %s", refreshStr) + return plugin.Error("route53", c.Errf("refresh interval must be greater than 0: %s", refreshStr)) } } else { - return c.ArgErr() + return plugin.Error("route53", c.ArgErr()) } default: - return c.Errf("unknown property '%s'", c.Val()) + return plugin.Error("route53", c.Errf("unknown property '%s'", c.Val())) } } providers = append(providers, &credentials.EnvProvider{}, sharedProvider, &ec2rolecreds.EC2RoleProvider{ @@ -129,13 +126,13 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro }) client := f(credentials.NewChainCredentials(providers)) ctx := context.Background() - h, err := New(ctx, client, keys, up, refresh) + h, err := New(ctx, client, keys, refresh) if err != nil { - return c.Errf("failed to create Route53 plugin: %v", err) + return plugin.Error("route53", c.Errf("failed to create Route53 plugin: %v", err)) } h.Fall = fall if err := h.Run(ctx); err != nil { - return c.Errf("failed to initialize Route53 plugin: %v", err) + return plugin.Error("route53", c.Errf("failed to initialize Route53 plugin: %v", err)) } dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { h.Next = next From a23617b84b9d846a90df8c627abeb83cda0b0896 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 13 Aug 2019 16:46:45 +0100 Subject: [PATCH 081/324] Add release notes for 1.6.2 Add notes for next release; azure was merged, nice to get that out, some other bugfixes. Signed-off-by: Miek Gieben --- notes/coredns-1.6.2.md | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 notes/coredns-1.6.2.md diff --git a/notes/coredns-1.6.2.md b/notes/coredns-1.6.2.md new file mode 100644 index 00000000000..b658e752ae3 --- /dev/null +++ b/notes/coredns-1.6.2.md @@ -0,0 +1,38 @@ ++++ +title = "CoreDNS-1.6.2 Release" +description = "CoreDNS-1.6.2 Release Notes." +tags = ["Release", "1.6.2", "Notes"] +release = "1.6.2" +date = 2019-08-13T14:35:47+01:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.2](https://github.com/coredns/coredns/releases/tag/v1.6.2). + +This is a bug fix release, but it also features a new plugin called [*azure*](/plugins/azure). + +# Plugins + +* Add [*azure*](/plugins/azure) to facilitate serving records from Microsoft Azure. +* Make the refresh frequency adjustable in the [*route53*](/plugins/route53) plugin. +* Fix the handling of truncated responses in [*forward*](/plugins/forward). + +## Brought to You By + +Andrey Meshkov, +Chris O'Haver, +Darshan Chaudhary, +ethan, +Matt Kulka +and +Miek Gieben. + +## Noteworthy Changes + +* plugin/azure: Add plugin for Azure DNS (https://github.com/coredns/coredns/pull/2945) +* plugin/forward: Fix handling truncated responses in forward (https://github.com/coredns/coredns/pull/3110) +* plugin/kubernetes: Don't do a zone transfer for NS requests (https://github.com/coredns/coredns/pull/3098) +* plugin/kubernetes: fix NXDOMAIN/NODATA fallthough case (https://github.com/coredns/coredns/pull/3118) +* plugin/route53: make refresh frequency adjustable (https://github.com/coredns/coredns/pull/3083) +* plugin/route53: Various updates (https://github.com/coredns/coredns/pull/3108) From 3c45e55f91d8773b76252ffdc7b7285d12d6a1e5 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Wed, 14 Aug 2019 00:47:23 +0800 Subject: [PATCH 082/324] Update parse.go (#3122) --- plugin/kubernetes/parse.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kubernetes/parse.go b/plugin/kubernetes/parse.go index 60d2d340242..c16adc4cab3 100644 --- a/plugin/kubernetes/parse.go +++ b/plugin/kubernetes/parse.go @@ -78,7 +78,7 @@ func parseRequest(state request.Request) (r recordRequest, err error) { return r, nil } - // Because of ambiquity we check the labels left: 1: an endpoint. 2: port and protocol. + // Because of ambiguity we check the labels left: 1: an endpoint. 2: port and protocol. // Anything else is a query that is too long to answer and can safely be delegated to return an nxdomain. switch last { From fcbe70f4f89609d540f15f27fbf97cb3010771c5 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 14 Aug 2019 07:56:12 +0000 Subject: [PATCH 083/324] run gofmt -w -s **/*.go (#3126) format and simplify; highlighted some stuff in route53_test.go that could be further simplified. Signed-off-by: Miek Gieben --- owners_generate.go | 2 +- plugin/route53/route53_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/owners_generate.go b/owners_generate.go index 162cf572292..90eebfc2373 100644 --- a/owners_generate.go +++ b/owners_generate.go @@ -43,7 +43,7 @@ func main() { if err != nil { log.Fatal(err) } - + // sort it and format it list := []string{} for k := range o { diff --git a/plugin/route53/route53_test.go b/plugin/route53/route53_test.go index 8db42d2520f..a4d71fbff48 100644 --- a/plugin/route53/route53_test.go +++ b/plugin/route53/route53_test.go @@ -79,7 +79,7 @@ func (fakeRoute53) ListResourceRecordSetsPagesWithContext(_ aws.Context, in *rou func TestRoute53(t *testing.T) { ctx := context.Background() - r, err := New(ctx, fakeRoute53{}, map[string][]string{"bad.": {"0987654321"}}, time.Duration(1)*time.Minute) + r, err := New(ctx, fakeRoute53{}, map[string][]string{"bad.": {"0987654321"}}, time.Minute) if err != nil { t.Fatalf("Failed to create Route53: %v", err) } @@ -87,7 +87,7 @@ func TestRoute53(t *testing.T) { t.Fatalf("Expected errors for zone bad.") } - r, err = New(ctx, fakeRoute53{}, map[string][]string{"org.": {"1357986420", "1234567890"}, "gov.": {"Z098765432", "1234567890"}}, time.Duration(90)*time.Second) + r, err = New(ctx, fakeRoute53{}, map[string][]string{"org.": {"1357986420", "1234567890"}, "gov.": {"Z098765432", "1234567890"}}, 90*time.Second) if err != nil { t.Fatalf("Failed to create Route53: %v", err) } From ea0d6abc859a4e23960aa814c2c557710a832bf8 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 14 Aug 2019 08:10:50 +0000 Subject: [PATCH 084/324] Tweak the notes a bit (#3128) Say a thing about 1.12.8 and HTTP/2 Signed-off-by: Miek Gieben --- notes/coredns-1.6.2.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/notes/coredns-1.6.2.md b/notes/coredns-1.6.2.md index b658e752ae3..7d1d8ed5609 100644 --- a/notes/coredns-1.6.2.md +++ b/notes/coredns-1.6.2.md @@ -12,6 +12,9 @@ The CoreDNS team has released This is a bug fix release, but it also features a new plugin called [*azure*](/plugins/azure). +It's compiled with Go 1.12.8 that incorperates fixes for HTTP/2 that may impact you if you use +[DoH](https://tools.ietf.org/html/rfc8484). + # Plugins * Add [*azure*](/plugins/azure) to facilitate serving records from Microsoft Azure. From 795a3ebb649cc11e0744797314eee1f3f22277b6 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 14 Aug 2019 08:22:30 +0000 Subject: [PATCH 085/324] up version to 1.6.2 (#3129) Signed-off-by: Miek Gieben --- coremain/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coremain/version.go b/coremain/version.go index 11b7673451d..50ae6402155 100644 --- a/coremain/version.go +++ b/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.6.1" + CoreVersion = "1.6.2" coreName = "CoreDNS" serverType = "dns" ) From 70b9cdb541cf4bd257f4a6f25a8059e7875496b6 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Wed, 14 Aug 2019 21:39:25 +0800 Subject: [PATCH 086/324] fix wrong spells in coredns-002.md (#3130) --- notes/coredns-002.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notes/coredns-002.md b/notes/coredns-002.md index 0a326750729..660da854d24 100644 --- a/notes/coredns-002.md +++ b/notes/coredns-002.md @@ -77,7 +77,7 @@ CoreDNS is a DNS server that chains plugins, where each plugin implements a DNS * plugin/dnssec: replaced go-cache with golang-lru in dnssec. Also adds a `cache_capacity`. option in dnssec plugin so that the capacity of the LRU cache could be specified in the config file. -* plugin/logging: allow a response classs to be specified on log on responses matching the name *and* +* plugin/logging: allow a response class to be specified on log on responses matching the name *and* the response class. For instance only log denials for example.com: ~~~ corefile From d42f54467f1133a0a237fbbfb12e2ffa4c92323f Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Wed, 14 Aug 2019 10:48:15 -0700 Subject: [PATCH 087/324] Update README.md (#3131) Just minor textual cleanup. --- plugin/k8s_external/README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/plugin/k8s_external/README.md b/plugin/k8s_external/README.md index f24c9a7719a..5960265236e 100644 --- a/plugin/k8s_external/README.md +++ b/plugin/k8s_external/README.md @@ -10,10 +10,10 @@ This plugin allows an additional zone to resolve the external IP address(es) of service. This plugin is only useful if the *kubernetes* plugin is also loaded. The plugin uses an external zone to resolve in-cluster IP addresses. It only handles queries for A, -AAAA and SRV records, all others result in NODATA responses. To make it a proper DNS zone it handles +AAAA and SRV records; all others result in NODATA responses. To make it a proper DNS zone, it handles SOA and NS queries for the apex of the zone. -By default the apex of the zone will look like (assuming the zone used is `example.org`): +By default the apex of the zone will look like the following (assuming the zone used is `example.org`): ~~~ dns example.org. 5 IN SOA ns1.dns.example.org. hostmaster.example.org. ( @@ -29,11 +29,11 @@ ns1.dns.example.org. 5 IN A .... ns1.dns.example.org. 5 IN AAAA .... ~~~ -Note we use the `dns` subdomain to place the records the DNS needs (see the `apex` directive). Also +Note that we use the `dns` subdomain for the records DNS needs (see the `apex` directive). Also note the SOA's serial number is static. The IP addresses of the nameserver records are those of the CoreDNS service. -The *k8s_external* plugin handles the subdomain `dns` and the apex of the zone by itself, all other +The *k8s_external* plugin handles the subdomain `dns` and the apex of the zone itself; all other queries are resolved to addresses in the cluster. ## Syntax @@ -44,7 +44,7 @@ k8s_external [ZONE...] * **ZONES** zones *k8s_external* should be authoritative for. -If you want to change the apex domain or use a different TTL for the return records you can use +If you want to change the apex domain or use a different TTL for the returned records you can use this extended syntax. ~~~ @@ -54,12 +54,12 @@ k8s_external [ZONE...] { } ~~~ -* **APEX** is the name (DNS label) to use the apex records, defaults to `dns`. +* **APEX** is the name (DNS label) to use for the apex records; it defaults to `dns`. * `ttl` allows you to set a custom **TTL** for responses. The default is 5 (seconds). # Examples -Enable names under `example.org` to be resolved to in cluster DNS addresses. +Enable names under `example.org` to be resolved to in-cluster DNS addresses. ~~~ . { @@ -68,7 +68,7 @@ Enable names under `example.org` to be resolved to in cluster DNS addresses. } ~~~ -With the Corefile above, the following Service will get an `A` record for `test.default.example.org` with IP address `192.168.200.123`. +With the Corefile above, the following Service will get an `A` record for `test.default.example.org` with the IP address `192.168.200.123`. ~~~ apiVersion: v1 From fe60f103008aa29b03348237c55a6a88d74b498d Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Thu, 15 Aug 2019 21:15:23 +0800 Subject: [PATCH 088/324] fix typos in kubernetes.go (#3132) --- plugin/kubernetes/kubernetes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kubernetes/kubernetes.go b/plugin/kubernetes/kubernetes.go index 1d9a7ec740c..fa3ef044671 100644 --- a/plugin/kubernetes/kubernetes.go +++ b/plugin/kubernetes/kubernetes.go @@ -68,7 +68,7 @@ const ( podModeDisabled = "disabled" // podModeVerified is where Pod requests are answered only if they exist podModeVerified = "verified" - // podModeInsecure is where pod requests are answered without verfying they exist + // podModeInsecure is where pod requests are answered without verifying they exist podModeInsecure = "insecure" // DNSSchemaVersion is the schema version: https://github.com/kubernetes/dns/blob/master/docs/specification.md DNSSchemaVersion = "1.0.1" From d36ca09c976cc96091b1c3e9f4b6e7a29a83ad15 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Fri, 16 Aug 2019 21:29:06 +0800 Subject: [PATCH 089/324] fix mis-spelling in object.go (#3134) --- plugin/kubernetes/object/object.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kubernetes/object/object.go b/plugin/kubernetes/object/object.go index 3809aaaf468..6b1c7d83903 100644 --- a/plugin/kubernetes/object/object.go +++ b/plugin/kubernetes/object/object.go @@ -27,7 +27,7 @@ type ToFunc func(interface{}) interface{} // Empty is an empty struct. type Empty struct{} -// GetObjectKind implementss the ObjectKind interface as a noop. +// GetObjectKind implements the ObjectKind interface as a noop. func (e *Empty) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } // GetGenerateName implements the metav1.Object interface. From 86edb15c279029685786288cb433b730e50553fb Mon Sep 17 00:00:00 2001 From: zhangguoyan Date: Sat, 17 Aug 2019 00:34:12 +0800 Subject: [PATCH 090/324] plugin/forward: fix a typo (#3133) --- plugin/forward/forward.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/forward/forward.go b/plugin/forward/forward.go index 9d37780da20..da2b2beb49f 100644 --- a/plugin/forward/forward.go +++ b/plugin/forward/forward.go @@ -113,7 +113,7 @@ func (f *Forward) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg continue } // Retry with TCP if truncated and prefer_udp configured. - if ret != nil && ret.Truncated && !opts.forceTCP && f.opts.preferUDP { + if ret != nil && ret.Truncated && !opts.forceTCP && opts.preferUDP { opts.forceTCP = true continue } From bde393096f76dc02752f71f9deff5934353e4eb8 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Sat, 17 Aug 2019 23:29:46 +0800 Subject: [PATCH 091/324] fix wrong spells in zone.go (#3135) --- plugin/file/zone.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/file/zone.go b/plugin/file/zone.go index 9a12ca69f65..27c95177495 100644 --- a/plugin/file/zone.go +++ b/plugin/file/zone.go @@ -155,7 +155,7 @@ func (z *Zone) TransferAllowed(state request.Request) bool { } // All returns all records from the zone, the first record will be the SOA record, -// otionally followed by all RRSIG(SOA)s. +// optionally followed by all RRSIG(SOA)s. func (z *Zone) All() []dns.RR { records := []dns.RR{} z.RLock() From 194b0f95b459a593deb6c2d1e048e020070a841a Mon Sep 17 00:00:00 2001 From: Palash Nigam Date: Sun, 18 Aug 2019 02:29:09 +0530 Subject: [PATCH 092/324] Add Google Cloud DNS plugin (#3011) Signed-off-by: Palash Nigam Closes: #2822 --- .vscode/launch.json | 17 ++ core/dnsserver/zdirectives.go | 1 + core/plugin/zplugin.go | 1 + go.mod | 1 + go.sum | 1 + plugin.cfg | 4 + plugin/clouddns/README.md | 67 +++++++ plugin/clouddns/clouddns.go | 222 ++++++++++++++++++++++ plugin/clouddns/clouddns_test.go | 316 +++++++++++++++++++++++++++++++ plugin/clouddns/gcp.go | 32 ++++ plugin/clouddns/log_test.go | 5 + plugin/clouddns/setup.go | 110 +++++++++++ plugin/clouddns/setup_test.go | 48 +++++ 13 files changed, 825 insertions(+) create mode 100644 .vscode/launch.json create mode 100644 plugin/clouddns/README.md create mode 100644 plugin/clouddns/clouddns.go create mode 100644 plugin/clouddns/clouddns_test.go create mode 100644 plugin/clouddns/gcp.go create mode 100644 plugin/clouddns/log_test.go create mode 100644 plugin/clouddns/setup.go create mode 100644 plugin/clouddns/setup_test.go diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000000..c23774cdda5 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,17 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Launch", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${fileDirname}", + "env": {}, + "args": [] + } + ] +} \ No newline at end of file diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go index b897cc04fa7..b48154448f0 100644 --- a/core/dnsserver/zdirectives.go +++ b/core/dnsserver/zdirectives.go @@ -37,6 +37,7 @@ var Directives = []string{ "hosts", "route53", "azure", + "clouddns", "federation", "k8s_external", "kubernetes", diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index 0c3b447ce1f..104a3e68afd 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -13,6 +13,7 @@ import ( _ "github.com/coredns/coredns/plugin/cache" _ "github.com/coredns/coredns/plugin/cancel" _ "github.com/coredns/coredns/plugin/chaos" + _ "github.com/coredns/coredns/plugin/clouddns" _ "github.com/coredns/coredns/plugin/debug" _ "github.com/coredns/coredns/plugin/dnssec" _ "github.com/coredns/coredns/plugin/dnstap" diff --git a/go.mod b/go.mod index b4e452793db..99828467b68 100644 --- a/go.mod +++ b/go.mod @@ -58,6 +58,7 @@ require ( golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect + google.golang.org/api v0.7.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.22.0 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 diff --git a/go.sum b/go.sum index e8ded047fb6..860fb298fa2 100644 --- a/go.sum +++ b/go.sum @@ -145,6 +145,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= diff --git a/plugin.cfg b/plugin.cfg index 4d18c7d4e5f..34d9492532b 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -45,7 +45,11 @@ autopath:autopath template:template hosts:hosts route53:route53 +<<<<<<< 6a6e9a9b33731656b51655072951649d9e716613 azure:azure +======= +clouddns:clouddns +>>>>>>> Add Google Cloud DNS plugin federation:federation k8s_external:k8s_external kubernetes:kubernetes diff --git a/plugin/clouddns/README.md b/plugin/clouddns/README.md new file mode 100644 index 00000000000..4aa5f04fbfb --- /dev/null +++ b/plugin/clouddns/README.md @@ -0,0 +1,67 @@ +# clouddns + +## Name + +*clouddns* - enables serving zone data from GCP clouddns. + +## Description + +The clouddns plugin is useful for serving zones from resource record +sets in GCP clouddns. This plugin supports all [Google Cloud DNS records](https://cloud.google.com/dns/docs/overview#supported_dns_record_types). +The clouddns plugin can be used when coredns is deployed on GCP or elsewhere. + +## Syntax + +~~~ txt +clouddns [ZONE:PROJECT_NAME:HOSTED_ZONE_NAME...] { + credentials [FILENAME] + fallthrough [ZONES...] +} +~~~ + +* **ZONE** the name of the domain to be accessed. When there are multiple zones with overlapping + domains (private vs. public hosted zone), CoreDNS does the lookup in the given order here. + Therefore, for a non-existing resource record, SOA response will be from the rightmost zone. + +* **HOSTED_ZONE_NAME** the name of the hosted zone that contains the resource record sets to be + accessed. + +* `credentials` is used for reading the credential file. + +* **FILENAME** GCP credentials file path. + +* `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. + If **[ZONES...]** is omitted, then fallthrough happens for all zones for which the plugin is + authoritative. If specific zones are listed (for example `in-addr.arpa` and `ip6.arpa`), then + only queries for those zones will be subject to fallthrough. + +* **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block + +## Examples + +Enable clouddns with implicit GCP credentials and resolve CNAMEs via 10.0.0.1: + +~~~ txt +. { + clouddns example.org.:gcp-example-project:example-zone + forward . 10.0.0.1 +} +~~~ + +Enable clouddns with fallthrough: + +~~~ txt +. { + clouddns example.org.:gcp-example-project:example-zone clouddns example.com.:gcp-example-project:example-zone-2 { + fallthrough example.gov. + } +} +~~~ + +Enable clouddns with multiple hosted zones with the same domain: + +~~~ txt +. { + clouddns example.org.:gcp-example-project:example-zone example.com.:gcp-example-project:other-example-zone +} +~~~ diff --git a/plugin/clouddns/clouddns.go b/plugin/clouddns/clouddns.go new file mode 100644 index 00000000000..ab04e5f75fd --- /dev/null +++ b/plugin/clouddns/clouddns.go @@ -0,0 +1,222 @@ +// Package clouddns implements a plugin that returns resource records +// from GCP Cloud DNS. +package clouddns + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/pkg/fall" + "github.com/coredns/coredns/plugin/pkg/upstream" + "github.com/coredns/coredns/request" + + "github.com/miekg/dns" + gcp "google.golang.org/api/dns/v1" +) + +// CloudDNS is a plugin that returns RR from GCP Cloud DNS. +type CloudDNS struct { + Next plugin.Handler + Fall fall.F + + zoneNames []string + client gcpDNS + upstream *upstream.Upstream + + zMu sync.RWMutex + zones zones +} + +type zone struct { + projectName string + zoneName string + z *file.Zone + dns string +} + +type zones map[string][]*zone + +// New reads from the keys map which uses domain names as its key and a colon separated +// string of project name and hosted zone name lists as its values, validates +// that each domain name/zone id pair does exist, and returns a new *CloudDNS. +// In addition to this, upstream is passed for doing recursive queries against CNAMEs. +// Returns error if it cannot verify any given domain name/zone id pair. +func New(ctx context.Context, c gcpDNS, keys map[string][]string, up *upstream.Upstream) (*CloudDNS, error) { + zones := make(map[string][]*zone, len(keys)) + zoneNames := make([]string, 0, len(keys)) + for dnsName, hostedZoneDetails := range keys { + for _, hostedZone := range hostedZoneDetails { + ss := strings.SplitN(hostedZone, ":", 2) + if len(ss) != 2 { + return nil, errors.New("either project or zone name missing") + } + err := c.zoneExists(ss[0], ss[1]) + if err != nil { + return nil, err + } + fqdnDNSName := dns.Fqdn(dnsName) + if _, ok := zones[fqdnDNSName]; !ok { + zoneNames = append(zoneNames, fqdnDNSName) + } + zones[fqdnDNSName] = append(zones[fqdnDNSName], &zone{projectName: ss[0], zoneName: ss[1], dns: fqdnDNSName, z: file.NewZone(fqdnDNSName, "")}) + } + } + return &CloudDNS{ + client: c, + zoneNames: zoneNames, + zones: zones, + upstream: up, + }, nil +} + +// Run executes first update, spins up an update forever-loop. +// Returns error if first update fails. +func (h *CloudDNS) Run(ctx context.Context) error { + if err := h.updateZones(ctx); err != nil { + return err + } + go func() { + for { + select { + case <-ctx.Done(): + log.Infof("Breaking out of CloudDNS update loop: %v", ctx.Err()) + return + case <-time.After(1 * time.Minute): + if err := h.updateZones(ctx); err != nil && ctx.Err() == nil /* Don't log error if ctx expired. */ { + log.Errorf("Failed to update zones: %v", err) + } + } + } + }() + return nil +} + +// ServeDNS implements the plugin.Handler interface. +func (h *CloudDNS) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + qname := state.Name() + + zName := plugin.Zones(h.zoneNames).Matches(qname) + if zName == "" { + return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) + } + + z, ok := h.zones[zName] // ok true if we are authoritive for the zone + if !ok || z == nil { + return dns.RcodeServerFailure, nil + } + + m := new(dns.Msg) + m.SetReply(r) + m.Authoritative = true + var result file.Result + + for _, hostedZone := range z { + h.zMu.RLock() + m.Answer, m.Ns, m.Extra, result = hostedZone.z.Lookup(ctx, state, qname) + h.zMu.RUnlock() + + // Take the answer if it's non-empty OR if there is another + // record type exists for this name (NODATA). + if len(m.Answer) != 0 || result == file.NoData { + break + } + } + + if len(m.Answer) == 0 && result != file.NoData && h.Fall.Through(qname) { + return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) + } + + switch result { + case file.Success: + case file.NoData: + case file.NameError: + m.Rcode = dns.RcodeNameError + case file.Delegation: + m.Authoritative = false + case file.ServerFailure: + return dns.RcodeServerFailure, nil + } + + w.WriteMsg(m) + return dns.RcodeSuccess, nil +} + +func updateZoneFromRRS(rrs *gcp.ResourceRecordSetsListResponse, z *file.Zone) error { + for _, rr := range rrs.Rrsets { + var rfc1035 string + var r dns.RR + var err error + for _, value := range rr.Rrdatas { + if rr.Type == "CNAME" || rr.Type == "PTR" { + value = dns.Fqdn(value) + } + + // Assemble RFC 1035 conforming record to pass into dns scanner. + rfc1035 = fmt.Sprintf("%s %d IN %s %s", dns.Fqdn(rr.Name), rr.Ttl, rr.Type, value) + r, err = dns.NewRR(rfc1035) + if err != nil { + return fmt.Errorf("failed to parse resource record: %v", err) + } + } + + z.Insert(r) + } + return nil +} + +// updateZones re-queries resource record sets for each zone and updates the +// zone object. +// Returns error if any zones error'ed out, but waits for other zones to +// complete first. +func (h *CloudDNS) updateZones(ctx context.Context) error { + errc := make(chan error) + defer close(errc) + for zName, z := range h.zones { + go func(zName string, z []*zone) { + var err error + var rrListResponse *gcp.ResourceRecordSetsListResponse + defer func() { + errc <- err + }() + + for i, hostedZone := range z { + newZ := file.NewZone(zName, "") + newZ.Upstream = h.upstream + rrListResponse, err = h.client.listRRSets(hostedZone.projectName, hostedZone.zoneName) + if err != nil { + err = fmt.Errorf("failed to list resource records for %v:%v:%v from gcp: %v", zName, hostedZone.projectName, hostedZone.zoneName, err) + return + } + updateZoneFromRRS(rrListResponse, newZ) + + h.zMu.Lock() + (*z[i]).z = newZ + h.zMu.Unlock() + } + + }(zName, z) + } + // Collect errors (if any). This will also sync on all zones updates + // completion. + var errs []string + for i := 0; i < len(h.zones); i++ { + err := <-errc + if err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) != 0 { + return fmt.Errorf("errors updating zones: %v", errs) + } + return nil +} + +// Name implements the Handler interface. +func (h *CloudDNS) Name() string { return "clouddns" } diff --git a/plugin/clouddns/clouddns_test.go b/plugin/clouddns/clouddns_test.go new file mode 100644 index 00000000000..dafd65bba56 --- /dev/null +++ b/plugin/clouddns/clouddns_test.go @@ -0,0 +1,316 @@ +package clouddns + +import ( + "context" + "errors" + "reflect" + "testing" + + "github.com/coredns/coredns/plugin/pkg/dnstest" + "github.com/coredns/coredns/plugin/pkg/fall" + "github.com/coredns/coredns/plugin/pkg/upstream" + "github.com/coredns/coredns/plugin/test" + crequest "github.com/coredns/coredns/request" + + "github.com/miekg/dns" + gcp "google.golang.org/api/dns/v1" +) + +type fakeGCPClient struct { + *gcp.Service +} + +func (c fakeGCPClient) zoneExists(projectName, hostedZoneName string) error { + return nil +} + +func (c fakeGCPClient) listRRSets(projectName, hostedZoneName string) (*gcp.ResourceRecordSetsListResponse, error) { + if projectName == "bad-project" || hostedZoneName == "bad-zone" { + return nil, errors.New("the 'parameters.managedZone' resource named 'bad-zone' does not exist") + } + + var rr []*gcp.ResourceRecordSet + + if hostedZoneName == "sample-zone-1" { + rr = []*gcp.ResourceRecordSet{ + { + Name: "example.org.", + Ttl: 300, + Type: "A", + Rrdatas: []string{"1.2.3.4"}, + }, + { + Name: "www.example.org", + Ttl: 300, + Type: "A", + Rrdatas: []string{"1.2.3.4"}, + }, + { + Name: "*.www.example.org", + Ttl: 300, + Type: "CNAME", + Rrdatas: []string{"www.example.org"}, + }, + { + Name: "example.org.", + Ttl: 300, + Type: "AAAA", + Rrdatas: []string{"2001:db8:85a3::8a2e:370:7334"}, + }, + { + Name: "sample.example.org", + Ttl: 300, + Type: "CNAME", + Rrdatas: []string{"example.org"}, + }, + { + Name: "example.org.", + Ttl: 300, + Type: "PTR", + Rrdatas: []string{"ptr.example.org."}, + }, + { + Name: "org.", + Ttl: 300, + Type: "SOA", + Rrdatas: []string{"ns-cloud-c1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + { + Name: "com.", + Ttl: 300, + Type: "NS", + Rrdatas: []string{"ns-cloud-c4.googledomains.com."}, + }, + { + Name: "split-example.gov.", + Ttl: 300, + Type: "A", + Rrdatas: []string{"1.2.3.4"}, + }, + { + Name: "swag.", + Ttl: 300, + Type: "YOLO", + Rrdatas: []string{"foobar"}, + }, + } + } else { + rr = []*gcp.ResourceRecordSet{ + { + Name: "split-example.org.", + Ttl: 300, + Type: "A", + Rrdatas: []string{"1.2.3.4"}, + }, + { + Name: "other-example.org.", + Ttl: 300, + Type: "A", + Rrdatas: []string{"3.5.7.9"}, + }, + { + Name: "org.", + Ttl: 300, + Type: "SOA", + Rrdatas: []string{"ns-cloud-e1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + } + } + + return &gcp.ResourceRecordSetsListResponse{Rrsets: rr}, nil +} + +func TestCloudDNS(t *testing.T) { + ctx := context.Background() + + r, err := New(ctx, fakeGCPClient{}, map[string][]string{"bad.": {"bad-project:bad-zone"}}, &upstream.Upstream{}) + if err != nil { + t.Fatalf("Failed to create Cloud DNS: %v", err) + } + if err = r.Run(ctx); err == nil { + t.Fatalf("Expected errors for zone bad.") + } + + r, err = New(ctx, fakeGCPClient{}, map[string][]string{"org.": {"sample-project-1:sample-zone-2", "sample-project-1:sample-zone-1"}, "gov.": {"sample-project-1:sample-zone-2", "sample-project-1:sample-zone-1"}}, &upstream.Upstream{}) + if err != nil { + t.Fatalf("Failed to create Cloud DNS: %v", err) + } + r.Fall = fall.Zero + r.Fall.SetZonesFromArgs([]string{"gov."}) + r.Next = test.HandlerFunc(func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := crequest.Request{W: w, Req: r} + qname := state.Name() + m := new(dns.Msg) + rcode := dns.RcodeServerFailure + if qname == "example.gov." { + m.SetReply(r) + rr, err := dns.NewRR("example.gov. 300 IN A 2.4.6.8") + if err != nil { + t.Fatalf("Failed to create Resource Record: %v", err) + } + m.Answer = []dns.RR{rr} + + m.Authoritative = true + rcode = dns.RcodeSuccess + + } + + m.SetRcode(r, rcode) + w.WriteMsg(m) + return rcode, nil + }) + err = r.Run(ctx) + if err != nil { + t.Fatalf("Failed to initialize Cloud DNS: %v", err) + } + + tests := []struct { + qname string + qtype uint16 + wantRetCode int + wantAnswer []string // ownernames for the records in the additional section. + wantMsgRCode int + wantNS []string + expectedErr error + }{ + // 0. example.org A found - success. + { + qname: "example.org", + qtype: dns.TypeA, + wantAnswer: []string{"example.org. 300 IN A 1.2.3.4"}, + }, + // 1. example.org AAAA found - success. + { + qname: "example.org", + qtype: dns.TypeAAAA, + wantAnswer: []string{"example.org. 300 IN AAAA 2001:db8:85a3::8a2e:370:7334"}, + }, + // 2. exampled.org PTR found - success. + { + qname: "example.org", + qtype: dns.TypePTR, + wantAnswer: []string{"example.org. 300 IN PTR ptr.example.org."}, + }, + // 3. sample.example.org points to example.org CNAME. + // Query must return both CNAME and A recs. + { + qname: "sample.example.org", + qtype: dns.TypeA, + wantAnswer: []string{ + "sample.example.org. 300 IN CNAME example.org.", + "example.org. 300 IN A 1.2.3.4", + }, + }, + // 4. Explicit CNAME query for sample.example.org. + // Query must return just CNAME. + { + qname: "sample.example.org", + qtype: dns.TypeCNAME, + wantAnswer: []string{"sample.example.org. 300 IN CNAME example.org."}, + }, + // 5. Explicit SOA query for example.org. + { + qname: "example.org", + qtype: dns.TypeSOA, + wantAnswer: []string{"org. 300 IN SOA ns-cloud-e1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 6. Explicit SOA query for example.org. + { + qname: "example.org", + qtype: dns.TypeNS, + wantNS: []string{"org. 300 IN SOA ns-cloud-c1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 7. AAAA query for split-example.org must return NODATA. + { + qname: "split-example.gov", + qtype: dns.TypeAAAA, + wantRetCode: dns.RcodeSuccess, + wantNS: []string{"org. 300 IN SOA ns-cloud-c1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 8. Zone not configured. + { + qname: "badexample.com", + qtype: dns.TypeA, + wantRetCode: dns.RcodeServerFailure, + wantMsgRCode: dns.RcodeServerFailure, + }, + // 9. No record found. Return SOA record. + { + qname: "bad.org", + qtype: dns.TypeA, + wantRetCode: dns.RcodeSuccess, + wantMsgRCode: dns.RcodeNameError, + wantNS: []string{"org. 300 IN SOA ns-cloud-c1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 10. No record found. Fallthrough. + { + qname: "example.gov", + qtype: dns.TypeA, + wantAnswer: []string{"example.gov. 300 IN A 2.4.6.8"}, + }, + // 11. other-zone.example.org is stored in a different hosted zone. success + { + qname: "other-example.org", + qtype: dns.TypeA, + wantAnswer: []string{"other-example.org. 300 IN A 3.5.7.9"}, + }, + // 12. split-example.org only has A record. Expect NODATA. + { + qname: "split-example.org", + qtype: dns.TypeAAAA, + wantNS: []string{"org. 300 IN SOA ns-cloud-e1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 300 259200 300"}, + }, + // 13. *.www.example.org is a wildcard CNAME to www.example.org. + { + qname: "a.www.example.org", + qtype: dns.TypeA, + wantAnswer: []string{ + "a.www.example.org. 300 IN CNAME www.example.org.", + "www.example.org. 300 IN A 1.2.3.4", + }, + }, + } + + for ti, tc := range tests { + req := new(dns.Msg) + req.SetQuestion(dns.Fqdn(tc.qname), tc.qtype) + + rec := dnstest.NewRecorder(&test.ResponseWriter{}) + code, err := r.ServeDNS(ctx, rec, req) + + if err != tc.expectedErr { + t.Fatalf("Test %d: Expected error %v, but got %v", ti, tc.expectedErr, err) + } + if code != int(tc.wantRetCode) { + t.Fatalf("Test %d: Expected returned status code %s, but got %s", ti, dns.RcodeToString[tc.wantRetCode], dns.RcodeToString[code]) + } + + if tc.wantMsgRCode != rec.Msg.Rcode { + t.Errorf("Test %d: Unexpected msg status code. Want: %s, got: %s", ti, dns.RcodeToString[tc.wantMsgRCode], dns.RcodeToString[rec.Msg.Rcode]) + } + + if len(tc.wantAnswer) != len(rec.Msg.Answer) { + t.Errorf("Test %d: Unexpected number of Answers. Want: %d, got: %d", ti, len(tc.wantAnswer), len(rec.Msg.Answer)) + } else { + for i, gotAnswer := range rec.Msg.Answer { + if gotAnswer.String() != tc.wantAnswer[i] { + t.Errorf("Test %d: Unexpected answer.\nWant:\n\t%s\nGot:\n\t%s", ti, tc.wantAnswer[i], gotAnswer) + } + } + } + + if len(tc.wantNS) != len(rec.Msg.Ns) { + t.Errorf("Test %d: Unexpected NS number. Want: %d, got: %d", ti, len(tc.wantNS), len(rec.Msg.Ns)) + } else { + for i, ns := range rec.Msg.Ns { + got, ok := ns.(*dns.SOA) + if !ok { + t.Errorf("Test %d: Unexpected NS type. Want: SOA, got: %v", ti, reflect.TypeOf(got)) + } + if got.String() != tc.wantNS[i] { + t.Errorf("Test %d: Unexpected NS.\nWant: %v\nGot: %v", ti, tc.wantNS[i], got) + } + } + } + } +} diff --git a/plugin/clouddns/gcp.go b/plugin/clouddns/gcp.go new file mode 100644 index 00000000000..6d9d85d432c --- /dev/null +++ b/plugin/clouddns/gcp.go @@ -0,0 +1,32 @@ +package clouddns + +import gcp "google.golang.org/api/dns/v1" + +type gcpDNS interface { + zoneExists(projectName, hostedZoneName string) error + listRRSets(projectName, hostedZoneName string) (*gcp.ResourceRecordSetsListResponse, error) +} + +type gcpClient struct { + *gcp.Service +} + +// zoneExists is a wrapper method around `gcp.Service.ManagedZones.Get` +// it checks if the provided zone name for a given project exists. +func (c gcpClient) zoneExists(projectName, hostedZoneName string) error { + _, err := c.ManagedZones.Get(projectName, hostedZoneName).Do() + if err != nil { + return err + } + return nil +} + +// listRRSets is a wrapper method around `gcp.Service.ResourceRecordSets.List` +// it fetches and returns the record sets for a hosted zone. +func (c gcpClient) listRRSets(projectName, hostedZoneName string) (*gcp.ResourceRecordSetsListResponse, error) { + rr, err := c.ResourceRecordSets.List(projectName, hostedZoneName).Do() + if err != nil { + return nil, err + } + return rr, nil +} diff --git a/plugin/clouddns/log_test.go b/plugin/clouddns/log_test.go new file mode 100644 index 00000000000..148635b4be8 --- /dev/null +++ b/plugin/clouddns/log_test.go @@ -0,0 +1,5 @@ +package clouddns + +import clog "github.com/coredns/coredns/plugin/pkg/log" + +func init() { clog.Discard() } diff --git a/plugin/clouddns/setup.go b/plugin/clouddns/setup.go new file mode 100644 index 00000000000..732c240f77f --- /dev/null +++ b/plugin/clouddns/setup.go @@ -0,0 +1,110 @@ +package clouddns + +import ( + "context" + "strings" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/pkg/fall" + clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/plugin/pkg/upstream" + + "github.com/caddyserver/caddy" + gcp "google.golang.org/api/dns/v1" + "google.golang.org/api/option" +) + +var log = clog.NewWithPlugin("clouddns") + +func init() { + caddy.RegisterPlugin("clouddns", caddy.Plugin{ + ServerType: "dns", + Action: func(c *caddy.Controller) error { + f := func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { + var err error + var client *gcp.Service + if opt != nil { + client, err = gcp.NewService(ctx, opt) + } else { + // if credentials file is not provided in the Corefile + // authenticate the client using env variables + client, err = gcp.NewService(ctx) + } + return gcpClient{client}, err + } + return setup(c, f) + }, + }) +} + +func setup(c *caddy.Controller, f func(ctx context.Context, opt option.ClientOption) (gcpDNS, error)) error { + for c.Next() { + keyPairs := map[string]struct{}{} + keys := map[string][]string{} + + var fall fall.F + up := upstream.New() + + args := c.RemainingArgs() + + for i := 0; i < len(args); i++ { + parts := strings.SplitN(args[i], ":", 3) + if len(parts) != 3 { + return c.Errf("invalid zone '%s'", args[i]) + } + dnsName, projectName, hostedZone := parts[0], parts[1], parts[2] + if dnsName == "" || projectName == "" || hostedZone == "" { + return c.Errf("invalid zone '%s'", args[i]) + } + if _, ok := keyPairs[args[i]]; ok { + return c.Errf("conflict zone '%s'", args[i]) + } + + keyPairs[args[i]] = struct{}{} + keys[dnsName] = append(keys[dnsName], projectName+":"+hostedZone) + } + + var opt option.ClientOption + for c.NextBlock() { + switch c.Val() { + case "upstream": + c.RemainingArgs() // eats args + // if filepath is provided in the Corefile use it to authenticate the dns client + case "credentials": + if c.NextArg() { + opt = option.WithCredentialsFile(c.Val()) + } else { + return c.ArgErr() + } + case "fallthrough": + fall.SetZonesFromArgs(c.RemainingArgs()) + default: + return c.Errf("unknown property '%s'", c.Val()) + } + } + + ctx := context.Background() + client, err := f(ctx, opt) + if err != nil { + return err + } + + h, err := New(ctx, client, keys, up) + if err != nil { + return c.Errf("failed to create Cloud DNS plugin: %v", err) + } + h.Fall = fall + + if err := h.Run(ctx); err != nil { + return c.Errf("failed to initialize Cloud DNS plugin: %v", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + h.Next = next + return h + }) + } + + return nil +} diff --git a/plugin/clouddns/setup_test.go b/plugin/clouddns/setup_test.go new file mode 100644 index 00000000000..be9c51d92f4 --- /dev/null +++ b/plugin/clouddns/setup_test.go @@ -0,0 +1,48 @@ +package clouddns + +import ( + "context" + "testing" + + "github.com/caddyserver/caddy" + "google.golang.org/api/option" +) + +func TestSetupCloudDNS(t *testing.T) { + f := func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { + return fakeGCPClient{}, nil + } + + tests := []struct { + body string + expectedError bool + }{ + {`clouddns`, false}, + {`clouddns :`, true}, + {`clouddns ::`, true}, + {`clouddns example.org.:example-project:zone-name`, false}, + {`clouddns example.org.:example-project:zone-name { }`, false}, + {`clouddns example.org.:example-project: { }`, true}, + {`clouddns example.org.:example-project:zone-name { }`, false}, + {`clouddns example.org.:example-project:zone-name { wat +}`, true}, + {`clouddns example.org.:example-project:zone-name { + fallthrough +}`, false}, + {`clouddns example.org.:example-project:zone-name { + credentials +}`, true}, + {`clouddns example.org.:example-project:zone-name example.org.:example-project:zone-name { + }`, true}, + + {`clouddns example.org { + }`, true}, + } + + for _, test := range tests { + c := caddy.NewTestController("dns", test.body) + if err := setup(c, f); (err == nil) == test.expectedError { + t.Errorf("Unexpected errors: %v", err) + } + } +} From ef7efae761238afeddcef4ea102f4672793b2a61 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sat, 17 Aug 2019 14:07:23 -0700 Subject: [PATCH 093/324] Remove merge left overs (#3136) Looks like there were some leftover in PR 3011 during the merge conflict rebase. This PR removes the unneeded file and fixed the plugin.cfg Signed-off-by: Yong Tang --- .vscode/launch.json | 17 ----------------- plugin.cfg | 3 --- 2 files changed, 20 deletions(-) delete mode 100644 .vscode/launch.json diff --git a/.vscode/launch.json b/.vscode/launch.json deleted file mode 100644 index c23774cdda5..00000000000 --- a/.vscode/launch.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "name": "Launch", - "type": "go", - "request": "launch", - "mode": "auto", - "program": "${fileDirname}", - "env": {}, - "args": [] - } - ] -} \ No newline at end of file diff --git a/plugin.cfg b/plugin.cfg index 34d9492532b..be944753412 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -45,11 +45,8 @@ autopath:autopath template:template hosts:hosts route53:route53 -<<<<<<< 6a6e9a9b33731656b51655072951649d9e716613 azure:azure -======= clouddns:clouddns ->>>>>>> Add Google Cloud DNS plugin federation:federation k8s_external:k8s_external kubernetes:kubernetes From b53d822e62f36c1cdd693b67d172efd1d7c60cdc Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sat, 17 Aug 2019 14:46:48 -0700 Subject: [PATCH 094/324] Additional content in clouddns plugin, and format of markdown (#3138) This PR adds some content in clouddns plugin to mention about the fact that privately hosted zone does not need to attach to a VPC. Also change PROJECT_NAME to PROJECTT_ID, and reformt the markdown to replace tab with 4 whitespace. Signed-off-by: Yong Tang --- plugin/clouddns/README.md | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/plugin/clouddns/README.md b/plugin/clouddns/README.md index 4aa5f04fbfb..94156ea0815 100644 --- a/plugin/clouddns/README.md +++ b/plugin/clouddns/README.md @@ -6,14 +6,18 @@ ## Description -The clouddns plugin is useful for serving zones from resource record -sets in GCP clouddns. This plugin supports all [Google Cloud DNS records](https://cloud.google.com/dns/docs/overview#supported_dns_record_types). -The clouddns plugin can be used when coredns is deployed on GCP or elsewhere. +The clouddns plugin is useful for serving zones from resource record sets in GCP clouddns. +This plugin supports all [Google Cloud DNS records](https://cloud.google.com/dns/docs/overview#supported_dns_record_types). +The clouddns plugin can be used when coredns is deployed on GCP or elsewhere. Note that +this plugin access the the resource records through Google Cloud API. For records in a +privately hosted zone, it is not necessary to place CoreDNS and this plugin in associated +VPC network. In fact the private hosted zone could be created without any associated VPC +and this plugin could still access the resource records under the hosted zone. ## Syntax ~~~ txt -clouddns [ZONE:PROJECT_NAME:HOSTED_ZONE_NAME...] { +clouddns [ZONE:PROJECT_ID:HOSTED_ZONE_NAME...] { credentials [FILENAME] fallthrough [ZONES...] } @@ -23,12 +27,14 @@ clouddns [ZONE:PROJECT_NAME:HOSTED_ZONE_NAME...] { domains (private vs. public hosted zone), CoreDNS does the lookup in the given order here. Therefore, for a non-existing resource record, SOA response will be from the rightmost zone. +* **PROJECT_ID** the project ID of the Google Cloud project. + * **HOSTED_ZONE_NAME** the name of the hosted zone that contains the resource record sets to be accessed. * `credentials` is used for reading the credential file. -* **FILENAME** GCP credentials file path. +* **FILENAME** GCP credentials file path (normally a .json file). * `fallthrough` If zone matches and no record can be generated, pass request to the next plugin. If **[ZONES...]** is omitted, then fallthrough happens for all zones for which the plugin is @@ -43,7 +49,7 @@ Enable clouddns with implicit GCP credentials and resolve CNAMEs via 10.0.0.1: ~~~ txt . { - clouddns example.org.:gcp-example-project:example-zone + clouddns example.org.:gcp-example-project:example-zone forward . 10.0.0.1 } ~~~ @@ -53,7 +59,7 @@ Enable clouddns with fallthrough: ~~~ txt . { clouddns example.org.:gcp-example-project:example-zone clouddns example.com.:gcp-example-project:example-zone-2 { - fallthrough example.gov. + fallthrough example.gov. } } ~~~ From bbc78abf6f24e6b42105cca2cc3989517ff8b952 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 18 Aug 2019 08:40:34 +0000 Subject: [PATCH 095/324] plugin/clouddns: tiny fixes for the README (#3140) * plugin/clouddns: tiny fixes for the README Did a post-merge review. Fix a few typos. Signed-off-by: Miek Gieben * Naming; coredns -> CoreDNS clouddns -> Cloud DNS and italics then the plugin's name are mentioned. Signed-off-by: Miek Gieben --- plugin/clouddns/README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/plugin/clouddns/README.md b/plugin/clouddns/README.md index 94156ea0815..dcb9aafa8ee 100644 --- a/plugin/clouddns/README.md +++ b/plugin/clouddns/README.md @@ -2,17 +2,18 @@ ## Name -*clouddns* - enables serving zone data from GCP clouddns. +*clouddns* - enables serving zone data from GCP Cloud DNS. ## Description -The clouddns plugin is useful for serving zones from resource record sets in GCP clouddns. -This plugin supports all [Google Cloud DNS records](https://cloud.google.com/dns/docs/overview#supported_dns_record_types). -The clouddns plugin can be used when coredns is deployed on GCP or elsewhere. Note that -this plugin access the the resource records through Google Cloud API. For records in a -privately hosted zone, it is not necessary to place CoreDNS and this plugin in associated -VPC network. In fact the private hosted zone could be created without any associated VPC -and this plugin could still access the resource records under the hosted zone. +The *clouddns* plugin is useful for serving zones from resource record +sets in GCP Cloud DNS. This plugin supports all [Google Cloud DNS +records](https://cloud.google.com/dns/docs/overview#supported_dns_record_types). This plugin can +be used when CoreDNS is deployed on GCP or elsewhere. Note that this plugin accesses the resource +records through the Google Cloud API. For records in a privately hosted zone, it is not necessary to +place CoreDNS and this plugin in the associated VPC network. In fact the private hosted zone could +be created without any associated VPC and this plugin could still access the resource records under +the hosted zone. ## Syntax From c33fc9e3b091facdabaf028764ae3df5d85b53d3 Mon Sep 17 00:00:00 2001 From: Yevgeny Pats Date: Sun, 18 Aug 2019 11:40:59 +0300 Subject: [PATCH 096/324] Add Continuous Fuzzing Integration to Fuzzit (#3093) This feature introduce continuous fuzzing with the following features: * Ruzzing: fuzz-targets are run continuously on master ( the fuzzers are updated every time new code is pushed to master) * Regresion: In addition to unit-tests travis runs all fuzz targets through the generated corpus to catch bugs early on in the CI process before merge. --- .travis.yml | 17 ++++++++++++----- Makefile | 20 ++++++++++++++++++++ Makefile.fuzz | 13 +++++++++++++ README.md | 1 + 4 files changed, 46 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 66b321739da..e29b074b35a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,6 @@ dist: xenial - +services: + - docker language: go go: - "1.12.x" @@ -18,10 +19,16 @@ branches: - master env: - - TEST_TYPE=coverage - - TEST_TYPE=integration - - TEST_TYPE=core - - TEST_TYPE=plugin + global: + # This is FUZZIT_API_KEY + - secure: "IGpZAyt1e5BZ1C4LeJG+GrgFZzaBQkJ3BX/+MwWN85aJSDk5gwThS53OCr/7RFgBKBgP9xBv9i9hAv0PxVaRE0ETIzjc0rQzceJIWiYKfFYQyscFahKfSiGsWP32rMlU3K67tA7yITS+Z8mMyVH9Ndr1Fg9AmLL+WfATdrd6dP8hzsUpaghKlnJee9TycrfamDpISzecdOY9xzxcwRyphZxuCc/n236Nt7f7Ccz0zx/Qa5igX6mjKZpUyBpS2u02GmNJTfc3W5SbTRP5bSJ+ozSkZZyG3tTpYmeN87AQJ/oG7rUEzqGLt78i7jSYAXghJZT06H/fHFsOKssCj1m0hYiarnGoGzXScLDqp2fpkyzilsUT+W0VgXTy2Nq+88Sideiy6UwDwpqHr5ktyoYFeSVB/aCTJl5oxDxBqs9dfeJSEAy7/AYy8kJoIE/yPYsBnGw10CAED4Rf5mfDgstkZRBdAO0xLBihkPsgza2975DVf27YSjJZ4eKrnR+G/aNCKycLQvWD/5c2bcLCJqyz0uMLQC/4LspS9b5bAKurzqFRdrD5q78NDcbodHelc7zBlFrRwGFCUjXTbQoU6r+1FA8y2Z+n1bd7mIF1JBVHurYAygyYXOcry870hyucGojonvdgBvHp6txeYyPU14VvTNwkF2mddpBCvoSTSPZ5X64=" + matrix: + - TEST_TYPE=coverage + - TEST_TYPE=integration + - TEST_TYPE=core + - TEST_TYPE=plugin + - TEST_TYPE=fuzzit FUZZIT_TYPE=local-regression + - TEST_TYPE=fuzzit FUZZIT_TYPE=fuzzing # In the Travis VM-based build environment, IPv6 networking is not # enabled by default. The sysctl operations below enable IPv6. diff --git a/Makefile b/Makefile index 135fe3f9e2d..619a431e529 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,26 @@ ifeq ($(TEST_TYPE),coverage) fi; \ done endif +ifeq ($(TEST_TYPE),fuzzit) + # skip fuzzing for PR + if [ "$(TRAVIS_PULL_REQUEST)" = "false" ] || [ "$(FUZZIT_TYPE)" = "local-regression" ] ; then \ + export GO111MODULE=off; \ + go get -u github.com/dvyukov/go-fuzz/go-fuzz-build; \ + go get -u -v .; \ + cd ../../go-acme/lego; \ + git checkout v2.5.0; \ + cd ../../coredns/coredns; \ + LIBFUZZER=YES make -f Makefile.fuzz cache chaos file rewrite whoami corefile; \ + wget -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.27/fuzzit_Linux_x86_64; \ + chmod a+x fuzzit; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/cache ./cache; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/chaos ./chaos; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/file ./file; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/rewrite ./rewrite; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/whoami ./whoami; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/corefile ./corefile; \ + fi; +endif core/plugin/zplugin.go core/dnsserver/zdirectives.go: plugin.cfg GO111MODULE=on go generate coredns.go diff --git a/Makefile.fuzz b/Makefile.fuzz index 666f4c93d89..b2340c42020 100644 --- a/Makefile.fuzz +++ b/Makefile.fuzz @@ -14,6 +14,7 @@ #$ go get github.com/dvyukov/go-fuzz/go-fuzz-build REPO:="github.com/coredns/coredns" +# set LIBFUZZER=YES to build libfuzzer compatible targets FUZZ:=$(dir $(wildcard plugin/*/fuzz.go)) # plugin/cache/ PLUGINS:=$(foreach f,$(FUZZ),$(subst plugin, ,$(f:/=))) # > /cache @@ -25,13 +26,25 @@ echo: .PHONY: $(PLUGINS) $(PLUGINS): echo +ifeq ($(LIBFUZZER), YES) + go-fuzz-build -tags fuzz -libfuzzer -o $(@).a ./plugin/$(@) + clang -fsanitize=fuzzer $(@).a -o $(@) +else go-fuzz-build -tags fuzz $(REPO)/plugin/$(@) go-fuzz -bin=./$(@)-fuzz.zip -workdir=fuzz/$(@) +endif + .PHONY: corefile corefile: +ifeq ($(LIBFUZZER), YES) + go-fuzz-build -tags fuzz -libfuzzer -o $(@).a ./test + clang -fsanitize=fuzzer $(@).a -o $(@) +else go-fuzz-build -tags fuzz $(REPO)/test go-fuzz -bin=./test-fuzz.zip -workdir=fuzz/$(@) +endif + .PHONY: clean diff --git a/README.md b/README.md index 22208502f4d..b587b2e0243 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/coredns/coredns) [![Build Status](https://img.shields.io/travis/coredns/coredns/master.svg?label=build)](https://travis-ci.org/coredns/coredns) +[![fuzzit](https://app.fuzzit.dev/badge?org_id=coredns&branch=master)](https://fuzzit.dev) [![Code Coverage](https://img.shields.io/codecov/c/github/coredns/coredns/master.svg)](https://codecov.io/github/coredns/coredns?branch=master) [![Docker Pulls](https://img.shields.io/docker/pulls/coredns/coredns.svg)](https://hub.docker.com/r/coredns/coredns) [![Go Report Card](https://goreportcard.com/badge/github.com/coredns/coredns)](https://goreportcard.com/report/coredns/coredns) From bbf148360b2bd35b9a1e358fdf7984a33b15a8d5 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Mon, 19 Aug 2019 00:53:46 +0800 Subject: [PATCH 097/324] fix wrong spells in coredns-007.md (#3142) --- notes/coredns-007.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notes/coredns-007.md b/notes/coredns-007.md index f7ea3b45d43..d723c5fef6c 100644 --- a/notes/coredns-007.md +++ b/notes/coredns-007.md @@ -30,7 +30,7 @@ Back to the release. * gRPC and TLS are made first class citizens. See [the zone specification](https://github.com/coredns/coredns/blob/master/README.md#zone-specification) on how to use it. TL;DR using `grpc://` makes the server talk gRPC. The `tls` directive is used to - specify TLS certifcates. + specify TLS certificates. * Zipkin tracing can be enabled for all plugin. # Plugins From 6402cef3371b6a5716dd032e2484d865cf25338a Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sun, 18 Aug 2019 14:41:51 -0700 Subject: [PATCH 098/324] Move federation plugin to github.com/coredns/federation (#3139) * Remove federation Signed-off-by: Yong Tang * Rebuild and point to github.com/coredns/federation Signed-off-by: Yong Tang * Export `localNodeName` => `LocalNodeName`, to be used by federation (until deprecation) Signed-off-by: Yong Tang * Remove plugin/kubernetes/federation.go (=> kubernetes/federation repo) Signed-off-by: Yong Tang * Update github.com/coredns/federation Signed-off-by: Yong Tang * sticker-ci fix Signed-off-by: Yong Tang --- core/plugin/zplugin.go | 2 +- go.mod | 1 + go.sum | 2 + plugin.cfg | 2 +- plugin/federation/OWNERS | 6 - plugin/federation/README.md | 39 ------ plugin/federation/federation.go | 147 ----------------------- plugin/federation/federation_test.go | 119 ------------------ plugin/federation/kubernetes_api_test.go | 140 --------------------- plugin/federation/log_test.go | 5 - plugin/federation/setup.go | 94 --------------- plugin/federation/setup_test.go | 65 ---------- plugin/kubernetes/federation.go | 51 -------- plugin/kubernetes/local.go | 3 +- 14 files changed, 7 insertions(+), 669 deletions(-) delete mode 100644 plugin/federation/OWNERS delete mode 100644 plugin/federation/README.md delete mode 100644 plugin/federation/federation.go delete mode 100644 plugin/federation/federation_test.go delete mode 100644 plugin/federation/kubernetes_api_test.go delete mode 100644 plugin/federation/log_test.go delete mode 100644 plugin/federation/setup.go delete mode 100644 plugin/federation/setup_test.go delete mode 100644 plugin/kubernetes/federation.go diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index 104a3e68afd..e761f858629 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -20,7 +20,6 @@ import ( _ "github.com/coredns/coredns/plugin/erratic" _ "github.com/coredns/coredns/plugin/errors" _ "github.com/coredns/coredns/plugin/etcd" - _ "github.com/coredns/coredns/plugin/federation" _ "github.com/coredns/coredns/plugin/file" _ "github.com/coredns/coredns/plugin/forward" _ "github.com/coredns/coredns/plugin/grpc" @@ -45,4 +44,5 @@ import ( _ "github.com/coredns/coredns/plugin/tls" _ "github.com/coredns/coredns/plugin/trace" _ "github.com/coredns/coredns/plugin/whoami" + _ "github.com/coredns/federation" ) diff --git a/go.mod b/go.mod index 99828467b68..2632ccd0780 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/Shopify/sarama v1.21.0 // indirect github.com/aws/aws-sdk-go v1.22.3 github.com/caddyserver/caddy v1.0.1 + github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible github.com/coreos/go-semver v0.2.0 // indirect diff --git a/go.sum b/go.sum index 860fb298fa2..aad90f981c5 100644 --- a/go.sum +++ b/go.sum @@ -78,6 +78,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXG github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9W4dScCwdRESOacCCdF3FkuB5pBIOv1U= +github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= diff --git a/plugin.cfg b/plugin.cfg index be944753412..2bd07c55768 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -47,7 +47,7 @@ hosts:hosts route53:route53 azure:azure clouddns:clouddns -federation:federation +federation:github.com/coredns/federation k8s_external:k8s_external kubernetes:kubernetes file:file diff --git a/plugin/federation/OWNERS b/plugin/federation/OWNERS deleted file mode 100644 index 187c629c943..00000000000 --- a/plugin/federation/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - chrisohaver - - miekg -approvers: - - chrisohaver - - miekg diff --git a/plugin/federation/README.md b/plugin/federation/README.md deleted file mode 100644 index 96ba213ed05..00000000000 --- a/plugin/federation/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# federation - -## Name - -*federation* - enables federated queries to be resolved via the kubernetes plugin. - -## Description - -Enabling this plugin allows -[Federated](https://kubernetes.io/docs/tasks/federation/federation-service-discovery/) queries to be -resolved via the kubernetes plugin. - -Enabling *federation* without also having *kubernetes* is a noop. - -## Syntax - -~~~ -federation [ZONES...] { - NAME DOMAIN -} -~~~ - -* Each **NAME** and **DOMAIN** defines federation membership. One entry for each. A duplicate - **NAME** will silently overwrite any previous value. - -## Examples - -Here we handle all service requests in the `prod` and `stage` federations. - -~~~ -. { - kubernetes cluster.local - federation cluster.local { - prod prod.feddomain.com - staging staging.feddomain.com - } - forward . 192.168.1.12 -} -~~~ diff --git a/plugin/federation/federation.go b/plugin/federation/federation.go deleted file mode 100644 index 7fe2a6f6285..00000000000 --- a/plugin/federation/federation.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Package federation implements kubernetes federation. It checks if the qname matches -a possible federation. If this is the case and the captured answer is an NXDOMAIN, -federation is performed. If this is not the case the original answer is returned. - -The federation label is always the 2nd to last once the zone is chopped of. For -instance "nginx.mynamespace.myfederation.svc.example.com" has "myfederation" as -the federation label. For federation to work we do a normal k8s lookup -*without* that label, if that comes back with NXDOMAIN or NODATA(??) we create -a federation record and return that. - -Federation is only useful in conjunction with the kubernetes plugin, without it is a noop. -*/ -package federation - -import ( - "context" - - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/plugin/etcd/msg" - "github.com/coredns/coredns/plugin/pkg/dnsutil" - "github.com/coredns/coredns/plugin/pkg/nonwriter" - "github.com/coredns/coredns/plugin/pkg/upstream" - "github.com/coredns/coredns/request" - - "github.com/miekg/dns" -) - -// Federation contains the name to zone mapping used for federation in kubernetes. -type Federation struct { - f map[string]string - zones []string - Upstream *upstream.Upstream - - Next plugin.Handler - Federations Func -} - -// Func needs to be implemented by any plugin that implements -// federation. Right now this is only the kubernetes plugin. -type Func func(state request.Request, fname, fzone string) (msg.Service, error) - -// New returns a new federation. -func New() *Federation { - return &Federation{f: make(map[string]string)} -} - -// ServeDNS implements the plugin.Handle interface. -func (f *Federation) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { - if f.Federations == nil { - return plugin.NextOrFailure(f.Name(), f.Next, ctx, w, r) - } - - state := request.Request{W: w, Req: r} - - zone := plugin.Zones(f.zones).Matches(state.Name()) - if zone == "" { - return plugin.NextOrFailure(f.Name(), f.Next, ctx, w, r) - } - - state.Zone = zone - - // Remove the federation label from the qname to see if something exists. - without, label := f.isNameFederation(state.Name(), state.Zone) - if without == "" { - return plugin.NextOrFailure(f.Name(), f.Next, ctx, w, r) - } - - qname := r.Question[0].Name - r.Question[0].Name = without - state.Clear() - - // Start the next plugin, but with a nowriter, capture the result, if NXDOMAIN - // perform federation, otherwise just write the result. - nw := nonwriter.New(w) - ret, err := plugin.NextOrFailure(f.Name(), f.Next, ctx, nw, r) - - if !plugin.ClientWrite(ret) { - // something went wrong - r.Question[0].Name = qname - return ret, err - } - - if m := nw.Msg; m.Rcode != dns.RcodeNameError { - // If positive answer we need to substitute the original qname in the answer. - m.Question[0].Name = qname - for _, a := range m.Answer { - a.Header().Name = qname - } - - w.WriteMsg(m) - - return dns.RcodeSuccess, nil - } - - // Still here, we've seen NXDOMAIN and need to perform federation. - service, err := f.Federations(state, label, f.f[label]) // state references Req which has updated qname - if err != nil { - r.Question[0].Name = qname - return dns.RcodeServerFailure, err - } - - r.Question[0].Name = qname - - m := new(dns.Msg) - m.SetReply(r) - m.Authoritative = true - - m.Answer = []dns.RR{service.NewCNAME(state.QName(), service.Host)} - - if f.Upstream != nil { - aRecord, err := f.Upstream.Lookup(ctx, state, service.Host, state.QType()) - if err == nil && aRecord != nil && len(aRecord.Answer) > 0 { - m.Answer = append(m.Answer, aRecord.Answer...) - } - } - - w.WriteMsg(m) - return dns.RcodeSuccess, nil -} - -// Name implements the plugin.Handle interface. -func (f *Federation) Name() string { return "federation" } - -// IsNameFederation checks the qname to see if it is a potential federation. The federation -// label is always the 2nd to last once the zone is chopped of. For instance -// "nginx.mynamespace.myfederation.svc.example.com" has "myfederation" as the federation label. -// IsNameFederation returns a new qname with the federation label and the label itself or two -// empty strings if there wasn't a hit. -func (f *Federation) isNameFederation(name, zone string) (string, string) { - base, _ := dnsutil.TrimZone(name, zone) - - // TODO(miek): dns.PrevLabel is better for memory, or dns.Split. - labels := dns.SplitDomainName(base) - ll := len(labels) - if ll < 2 { - return "", "" - } - - fed := labels[ll-2] - - if _, ok := f.f[fed]; ok { - without := dnsutil.Join(labels[:ll-2]...) + labels[ll-1] + "." + zone - return without, fed - } - return "", "" -} diff --git a/plugin/federation/federation_test.go b/plugin/federation/federation_test.go deleted file mode 100644 index 64d6272b7f7..00000000000 --- a/plugin/federation/federation_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package federation - -import ( - "context" - "testing" - - "github.com/coredns/coredns/plugin/kubernetes" - "github.com/coredns/coredns/plugin/pkg/dnstest" - "github.com/coredns/coredns/plugin/test" - - "github.com/miekg/dns" -) - -func TestIsNameFederation(t *testing.T) { - tests := []struct { - fed string - qname string - expectedZone string - }{ - {"prod", "nginx.mynamespace.prod.svc.example.com.", "nginx.mynamespace.svc.example.com."}, - {"prod", "nginx.mynamespace.staging.svc.example.com.", ""}, - {"prod", "nginx.mynamespace.example.com.", ""}, - {"prod", "example.com.", ""}, - {"prod", "com.", ""}, - } - - fed := New() - for i, tc := range tests { - fed.f[tc.fed] = "test-name" - if x, _ := fed.isNameFederation(tc.qname, "example.com."); x != tc.expectedZone { - t.Errorf("Test %d, failed to get zone, expected %s, got %s", i, tc.expectedZone, x) - } - } -} - -func TestFederationKubernetes(t *testing.T) { - tests := []test.Case{ - { - // service exists so we return the IP address associated with it. - Qname: "svc1.testns.prod.svc.cluster.local.", Qtype: dns.TypeA, - Rcode: dns.RcodeSuccess, - Answer: []dns.RR{ - test.A("svc1.testns.prod.svc.cluster.local. 303 IN A 10.0.0.1"), - }, - }, - { - // service does not exist, do the federation dance. - Qname: "svc0.testns.prod.svc.cluster.local.", Qtype: dns.TypeA, - Rcode: dns.RcodeSuccess, - Answer: []dns.RR{ - test.CNAME("svc0.testns.prod.svc.cluster.local. 303 IN CNAME svc0.testns.prod.svc.fd-az.fd-r.federal.example."), - }, - }, - } - - k := kubernetes.New([]string{"cluster.local."}) - k.APIConn = &APIConnFederationTest{zone: "fd-az", region: "fd-r"} - - fed := New() - fed.zones = []string{"cluster.local."} - fed.Federations = k.Federations - fed.Next = k - fed.f = map[string]string{ - "prod": "federal.example.", - } - - ctx := context.TODO() - for i, tc := range tests { - m := tc.Msg() - - rec := dnstest.NewRecorder(&test.ResponseWriter{}) - _, err := fed.ServeDNS(ctx, rec, m) - if err != nil { - t.Errorf("Test %d, expected no error, got %v", i, err) - return - } - - resp := rec.Msg - if err := test.SortAndCheck(resp, tc); err != nil { - t.Error(err) - } - } -} - -func TestFederationKubernetesMissingLabels(t *testing.T) { - tests := []test.Case{ - { - // service does not exist, do the federation dance. - Qname: "svc0.testns.prod.svc.cluster.local.", Qtype: dns.TypeA, - Rcode: dns.RcodeSuccess, - Answer: []dns.RR{ - test.CNAME("svc0.testns.prod.svc.cluster.local. 303 IN CNAME svc0.testns.prod.svc.fd-az.fd-r.federal.example."), - }, - }, - } - - k := kubernetes.New([]string{"cluster.local."}) - k.APIConn = &APIConnFederationTest{zone: "", region: ""} - - fed := New() - fed.zones = []string{"cluster.local."} - fed.Federations = k.Federations - fed.Next = k - fed.f = map[string]string{ - "prod": "federal.example.", - } - - ctx := context.TODO() - for _, tc := range tests { - m := tc.Msg() - - rec := dnstest.NewRecorder(&test.ResponseWriter{}) - _, err := fed.ServeDNS(ctx, rec, m) - if err == nil { - t.Errorf("Expected an error") - return - } - } -} diff --git a/plugin/federation/kubernetes_api_test.go b/plugin/federation/kubernetes_api_test.go deleted file mode 100644 index 35b058b076f..00000000000 --- a/plugin/federation/kubernetes_api_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package federation - -import ( - "github.com/coredns/coredns/plugin/kubernetes" - "github.com/coredns/coredns/plugin/kubernetes/object" - - api "k8s.io/api/core/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type APIConnFederationTest struct { - zone, region string -} - -func (APIConnFederationTest) HasSynced() bool { return true } -func (APIConnFederationTest) Run() { return } -func (APIConnFederationTest) Stop() error { return nil } -func (APIConnFederationTest) SvcIndexReverse(string) []*object.Service { return nil } -func (APIConnFederationTest) EpIndexReverse(string) []*object.Endpoints { return nil } -func (APIConnFederationTest) Modified() int64 { return 0 } - -func (APIConnFederationTest) PodIndex(string) []*object.Pod { - return []*object.Pod{ - {Namespace: "podns", PodIP: "10.240.0.1"}, // Remote IP set in test.ResponseWriter - } -} - -func (APIConnFederationTest) SvcIndex(string) []*object.Service { - svcs := []*object.Service{ - { - Name: "svc1", - Namespace: "testns", - ClusterIP: "10.0.0.1", - Ports: []api.ServicePort{ - {Name: "http", Protocol: "tcp", Port: 80}, - }, - }, - { - Name: "hdls1", - Namespace: "testns", - ClusterIP: api.ClusterIPNone, - }, - { - Name: "external", - Namespace: "testns", - ExternalName: "ext.interwebs.test", - Ports: []api.ServicePort{ - {Name: "http", Protocol: "tcp", Port: 80}, - }, - }, - } - return svcs -} - -func (APIConnFederationTest) ServiceList() []*object.Service { - svcs := []*object.Service{ - { - Name: "svc1", - Namespace: "testns", - ClusterIP: "10.0.0.1", - Ports: []api.ServicePort{ - {Name: "http", Protocol: "tcp", Port: 80}, - }, - }, - { - Name: "hdls1", - Namespace: "testns", - ClusterIP: api.ClusterIPNone, - }, - { - Name: "external", - Namespace: "testns", - ExternalName: "ext.interwebs.test", - Ports: []api.ServicePort{ - {Name: "http", Protocol: "tcp", Port: 80}, - }, - }, - } - return svcs -} - -func (APIConnFederationTest) EpIndex(string) []*object.Endpoints { - eps := []*object.Endpoints{ - { - Subsets: []object.EndpointSubset{ - { - Addresses: []object.EndpointAddress{ - {IP: "172.0.0.1", Hostname: "ep1a"}, - }, - Ports: []object.EndpointPort{ - {Port: 80, Protocol: "tcp", Name: "http"}, - }, - }, - }, - Name: "svc1", - Namespace: "testns", - }, - } - return eps -} - -func (APIConnFederationTest) EndpointsList() []*object.Endpoints { - eps := []*object.Endpoints{ - { - Subsets: []object.EndpointSubset{ - { - Addresses: []object.EndpointAddress{ - {IP: "172.0.0.1", Hostname: "ep1a"}, - }, - Ports: []object.EndpointPort{ - {Port: 80, Protocol: "tcp", Name: "http"}, - }, - }, - }, - Name: "svc1", - Namespace: "testns", - }, - } - return eps -} - -func (a APIConnFederationTest) GetNodeByName(name string) (*api.Node, error) { - return &api.Node{ - ObjectMeta: meta.ObjectMeta{ - Name: "test.node.foo.bar", - Labels: map[string]string{ - kubernetes.LabelRegion: a.region, - kubernetes.LabelZone: a.zone, - }, - }, - }, nil -} - -func (APIConnFederationTest) GetNamespaceByName(name string) (*api.Namespace, error) { - return &api.Namespace{ - ObjectMeta: meta.ObjectMeta{ - Name: name, - }, - }, nil -} diff --git a/plugin/federation/log_test.go b/plugin/federation/log_test.go deleted file mode 100644 index fa94817265d..00000000000 --- a/plugin/federation/log_test.go +++ /dev/null @@ -1,5 +0,0 @@ -package federation - -import clog "github.com/coredns/coredns/plugin/pkg/log" - -func init() { clog.Discard() } diff --git a/plugin/federation/setup.go b/plugin/federation/setup.go deleted file mode 100644 index fde50853d77..00000000000 --- a/plugin/federation/setup.go +++ /dev/null @@ -1,94 +0,0 @@ -package federation - -import ( - "fmt" - - "github.com/coredns/coredns/core/dnsserver" - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/plugin/kubernetes" - "github.com/coredns/coredns/plugin/pkg/upstream" - "github.com/miekg/dns" - - "github.com/caddyserver/caddy" -) - -func init() { - caddy.RegisterPlugin("federation", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} - -func setup(c *caddy.Controller) error { - fed, err := federationParse(c) - if err != nil { - return plugin.Error("federation", err) - } - - // Do this in OnStartup, so all plugin has been initialized. - c.OnStartup(func() error { - m := dnsserver.GetConfig(c).Handler("kubernetes") - if m == nil { - return nil - } - if x, ok := m.(*kubernetes.Kubernetes); ok { - fed.Federations = x.Federations - } - return nil - }) - - dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { - fed.Next = next - return fed - }) - - return nil -} - -func federationParse(c *caddy.Controller) (*Federation, error) { - fed := New() - fed.Upstream = upstream.New() - - for c.Next() { - // federation [zones..] - zones := c.RemainingArgs() - var origins []string - if len(zones) > 0 { - origins = make([]string, len(zones)) - copy(origins, zones) - } else { - origins = make([]string, len(c.ServerBlockKeys)) - copy(origins, c.ServerBlockKeys) - } - - for c.NextBlock() { - x := c.Val() - switch x { - case "upstream": - // remove soon - c.RemainingArgs() - default: - args := c.RemainingArgs() - if x := len(args); x != 1 { - return fed, fmt.Errorf("need two arguments for federation, got %d", x) - } - - fed.f[x] = dns.Fqdn(args[0]) - } - } - - for i := range origins { - origins[i] = plugin.Host(origins[i]).Normalize() - } - - fed.zones = origins - - if len(fed.f) == 0 { - return fed, fmt.Errorf("at least one name to zone federation expected") - } - - return fed, nil - } - - return fed, nil -} diff --git a/plugin/federation/setup_test.go b/plugin/federation/setup_test.go deleted file mode 100644 index 6aed5cce772..00000000000 --- a/plugin/federation/setup_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package federation - -import ( - "testing" - - "github.com/caddyserver/caddy" -) - -func TestSetup(t *testing.T) { - tests := []struct { - input string - shouldErr bool - expectedLen int - expectedNameZone []string // contains only entry for now - }{ - // ok - {`federation { - prod prod.example.org - }`, false, 1, []string{"prod", "prod.example.org."}}, - - {`federation { - staging staging.example.org - prod prod.example.org - }`, false, 2, []string{"prod", "prod.example.org."}}, - {`federation { - staging staging.example.org - prod prod.example.org - }`, false, 2, []string{"staging", "staging.example.org."}}, - {`federation example.com { - staging staging.example.org - prod prod.example.org - }`, false, 2, []string{"staging", "staging.example.org."}}, - // errors - {`federation { - }`, true, 0, []string{}}, - {`federation { - staging - }`, true, 0, []string{}}, - } - for i, test := range tests { - c := caddy.NewTestController("dns", test.input) - fed, err := federationParse(c) - if test.shouldErr && err == nil { - t.Errorf("Test %v: Expected error but found nil", i) - continue - } else if !test.shouldErr && err != nil { - t.Errorf("Test %v: Expected no error but found error: %v", i, err) - continue - } - if test.shouldErr && err != nil { - continue - } - - if x := len(fed.f); x != test.expectedLen { - t.Errorf("Test %v: Expected map length of %d, got: %d", i, test.expectedLen, x) - } - if x, ok := fed.f[test.expectedNameZone[0]]; !ok { - t.Errorf("Test %v: Expected name for %s, got nothing", i, test.expectedNameZone[0]) - } else { - if x != test.expectedNameZone[1] { - t.Errorf("Test %v: Expected zone: %s, got %s", i, test.expectedNameZone[1], x) - } - } - } -} diff --git a/plugin/kubernetes/federation.go b/plugin/kubernetes/federation.go deleted file mode 100644 index bf169b911d5..00000000000 --- a/plugin/kubernetes/federation.go +++ /dev/null @@ -1,51 +0,0 @@ -package kubernetes - -import ( - "errors" - - "github.com/coredns/coredns/plugin/etcd/msg" - "github.com/coredns/coredns/plugin/pkg/dnsutil" - "github.com/coredns/coredns/request" -) - -// The federation node.Labels keys used. -const ( - // TODO: Do not hardcode these labels. Pull them out of the API instead. - // - // We can get them via .... - // import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - // metav1.LabelZoneFailureDomain - // metav1.LabelZoneRegion - // - // But importing above breaks coredns with flag collision of 'log_dir' - - LabelZone = "failure-domain.beta.kubernetes.io/zone" - LabelRegion = "failure-domain.beta.kubernetes.io/region" -) - -// Federations is used from the federations plugin to return the service that should be -// returned as a CNAME for federation(s) to work. -func (k *Kubernetes) Federations(state request.Request, fname, fzone string) (msg.Service, error) { - nodeName := k.localNodeName() - node, err := k.APIConn.GetNodeByName(nodeName) - if err != nil { - return msg.Service{}, err - } - r, err := parseRequest(state) - if err != nil { - return msg.Service{}, err - } - - lz := node.Labels[LabelZone] - lr := node.Labels[LabelRegion] - - if lz == "" || lr == "" { - return msg.Service{}, errors.New("local node missing zone/region labels") - } - - if r.endpoint == "" { - return msg.Service{Host: dnsutil.Join(r.service, r.namespace, fname, r.podOrSvc, lz, lr, fzone)}, nil - } - - return msg.Service{Host: dnsutil.Join(r.endpoint, r.service, r.namespace, fname, r.podOrSvc, lz, lr, fzone)}, nil -} diff --git a/plugin/kubernetes/local.go b/plugin/kubernetes/local.go index e15fec497f1..c5918a3060e 100644 --- a/plugin/kubernetes/local.go +++ b/plugin/kubernetes/local.go @@ -21,7 +21,8 @@ func localPodIP() net.IP { return nil } -func (k *Kubernetes) localNodeName() string { +// LocalNodeName is exclusively used in federation plugin, will be deprecated later. +func (k *Kubernetes) LocalNodeName() string { localIP := k.interfaceAddrsFunc() if localIP == nil { return "" From 6fa61119bd4e62808b7c3ad6a417ca4a9ea5fb83 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Mon, 19 Aug 2019 00:02:08 -0700 Subject: [PATCH 099/324] Update build command inside docker (#3145) Since we use golang 1.12 and gomod, it is not necessary to match the impor path to `/go/src/github.com/coredns/coredns` in order to build coredns. For that reason the build command inside docker could be simplified with: ``` $ docker run --rm -i -t -v $PWD:/v -w /v golang:1.12 make ``` Signed-off-by: Yong Tang --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index b587b2e0243..1a38c402c77 100644 --- a/README.md +++ b/README.md @@ -69,8 +69,7 @@ CoreDNS requires Go to compile. However, if you already have docker installed an setup a Go environment, you could build CoreDNS easily: ``` -$ docker run --rm -i -t -v $PWD:/go/src/github.com/coredns/coredns \ - -w /go/src/github.com/coredns/coredns golang:1.12 make +$ docker run --rm -i -t -v $PWD:/v -w /v golang:1.12 make ``` The above command alone will have `coredns` binary generated. From 6f028d04278fb9dc9a67e125c507ee5252b8d859 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 19 Aug 2019 08:06:25 +0000 Subject: [PATCH 100/324] fuzz: some cleanups (#3143) * fuzz: some cleanups Signed-off-by: Miek Gieben * smaller Signed-off-by: Miek Gieben * documentation Signed-off-by: Miek Gieben * comments Signed-off-by: Miek Gieben --- Makefile | 27 +++++++++++---------------- Makefile.fuzz | 18 ++++++++++++------ plugin/pkg/fuzz/do.go | 2 +- 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/Makefile b/Makefile index 619a431e529..648b8f38bfc 100644 --- a/Makefile +++ b/Makefile @@ -47,22 +47,17 @@ endif ifeq ($(TEST_TYPE),fuzzit) # skip fuzzing for PR if [ "$(TRAVIS_PULL_REQUEST)" = "false" ] || [ "$(FUZZIT_TYPE)" = "local-regression" ] ; then \ - export GO111MODULE=off; \ - go get -u github.com/dvyukov/go-fuzz/go-fuzz-build; \ - go get -u -v .; \ - cd ../../go-acme/lego; \ - git checkout v2.5.0; \ - cd ../../coredns/coredns; \ - LIBFUZZER=YES make -f Makefile.fuzz cache chaos file rewrite whoami corefile; \ - wget -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.27/fuzzit_Linux_x86_64; \ - chmod a+x fuzzit; \ - ./fuzzit create job --type $(FUZZIT_TYPE) coredns/cache ./cache; \ - ./fuzzit create job --type $(FUZZIT_TYPE) coredns/chaos ./chaos; \ - ./fuzzit create job --type $(FUZZIT_TYPE) coredns/file ./file; \ - ./fuzzit create job --type $(FUZZIT_TYPE) coredns/rewrite ./rewrite; \ - ./fuzzit create job --type $(FUZZIT_TYPE) coredns/whoami ./whoami; \ - ./fuzzit create job --type $(FUZZIT_TYPE) coredns/corefile ./corefile; \ - fi; + export GO111MODULE=off; \ + go get -u github.com/dvyukov/go-fuzz/go-fuzz-build; \ + go get -u -v .; \ + cd ../../go-acme/lego && git checkout v2.5.0; \ + cd ../../coredns/coredns; \ + LIBFUZZER=YES $(MAKE) -f Makefile.fuzz all; \ + $(MAKE) -sf Makefile.fuzz fuzzit; \ + for i in `$(MAKE) -sf Makefile.fuzz echo`; do echo $$i; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/$$i ./$$i; \ + done; \ + fi; endif core/plugin/zplugin.go core/dnsserver/zdirectives.go: plugin.cfg diff --git a/Makefile.fuzz b/Makefile.fuzz index b2340c42020..4d518972002 100644 --- a/Makefile.fuzz +++ b/Makefile.fuzz @@ -1,5 +1,7 @@ # Makefile for fuzzing # +# With https://app.fuzzit.dev/ we are continuously fuzzing CoreDNS. +# # Use go-fuzz and needs the tools installed. For each fuzz.go in a plugin's directory # you can start the fuzzing with: make -f Makefile.fuzz # e.g. @@ -9,11 +11,13 @@ # Each plugin that wants to join the fuzzing fray only needs to add a fuzz.go that calls # the plugins's ServeDNS and used the plugin/pkg/fuzz for the Do function. # -# Installing go-fuzz -#$ go get github.com/dvyukov/go-fuzz/go-fuzz -#$ go get github.com/dvyukov/go-fuzz/go-fuzz-build +# Installing go-fuzz is very tricky because it does not support Go modules, see the `Makefile` +# for the current trickery. The following may do the trick: +# +# GO111MODULE=off go get github.com/dvyukov/go-fuzz/go-fuzz-build REPO:="github.com/coredns/coredns" +FUZZIT:=v2.4.28 # set LIBFUZZER=YES to build libfuzzer compatible targets FUZZ:=$(dir $(wildcard plugin/*/fuzz.go)) # plugin/cache/ @@ -22,7 +26,9 @@ PLUGINS:=$(foreach f,$(PLUGINS),$(subst /, ,$(f))) # > cache .PHONY: echo echo: - @echo fuzz targets: $(PLUGINS) + @echo $(PLUGINS) corefile + +all: $(PLUGINS) corefile .PHONY: $(PLUGINS) $(PLUGINS): echo @@ -34,7 +40,6 @@ else go-fuzz -bin=./$(@)-fuzz.zip -workdir=fuzz/$(@) endif - .PHONY: corefile corefile: ifeq ($(LIBFUZZER), YES) @@ -45,7 +50,8 @@ else go-fuzz -bin=./test-fuzz.zip -workdir=fuzz/$(@) endif - +fuzzit: + wget -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/$(FUZZIT)/fuzzit_Linux_x86_64 && chmod +x fuzzit .PHONY: clean clean: diff --git a/plugin/pkg/fuzz/do.go b/plugin/pkg/fuzz/do.go index dfb046b25bc..e2bc0edee4b 100644 --- a/plugin/pkg/fuzz/do.go +++ b/plugin/pkg/fuzz/do.go @@ -10,7 +10,7 @@ import ( "github.com/miekg/dns" ) -// Do will fuzz p - used by gofuzz. See Maefile.fuzz for comments and context. +// Do will fuzz p - used by gofuzz. See Makefile.fuzz for comments and context. func Do(p plugin.Handler, data []byte) int { ctx := context.TODO() ret := 1 From 81bba8015977952b1d923f2368371ff314d0d063 Mon Sep 17 00:00:00 2001 From: Xigang Wang Date: Mon, 19 Aug 2019 21:34:09 +0800 Subject: [PATCH 101/324] Change the ToEndpoints method comment to replace the Service with Endpoints (#3146) --- plugin/kubernetes/object/endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kubernetes/object/endpoint.go b/plugin/kubernetes/object/endpoint.go index aa93b4ceb50..bd0afeef3dc 100644 --- a/plugin/kubernetes/object/endpoint.go +++ b/plugin/kubernetes/object/endpoint.go @@ -43,7 +43,7 @@ type EndpointPort struct { // EndpointsKey return a string using for the index. func EndpointsKey(name, namespace string) string { return name + "." + namespace } -// ToEndpoints converts an api.Service to a *Service. +// ToEndpoints converts an api.Endpoints to a *Endpoints. func ToEndpoints(obj interface{}) interface{} { end, ok := obj.(*api.Endpoints) if !ok { From 8ab83a175a563b4dac2b8c8b66c30fa68578c115 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Mon, 19 Aug 2019 21:36:59 +0800 Subject: [PATCH 102/324] fix mis-spelling in item.go (#3156) --- plugin/cache/item.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/cache/item.go b/plugin/cache/item.go index edc7610a41e..3071f1512e0 100644 --- a/plugin/cache/item.go +++ b/plugin/cache/item.go @@ -54,7 +54,7 @@ func (i *item) toMsg(m *dns.Msg, now time.Time) *dns.Msg { m1 := new(dns.Msg) m1.SetReply(m) - // Set this to true as some DNS clients disgard the *entire* packet when it's non-authoritative. + // Set this to true as some DNS clients discard the *entire* packet when it's non-authoritative. // This is probably not according to spec, but the bit itself is not super useful as this point, so // just set it to true. m1.Authoritative = true From fe0f3d9818ce97500b632d4b4422d102008f5aa2 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2019 07:53:09 -0700 Subject: [PATCH 103/324] build(deps): bump github.com/caddyserver/caddy from 1.0.1 to 1.0.3 (#3148) Bumps [github.com/caddyserver/caddy](https://github.com/caddyserver/caddy) from 1.0.1 to 1.0.3. - [Release notes](https://github.com/caddyserver/caddy/releases) - [Commits](https://github.com/caddyserver/caddy/compare/v1.0.1...v1.0.3) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 2632ccd0780..866284a5bbb 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/aws/aws-sdk-go v1.22.3 - github.com/caddyserver/caddy v1.0.1 + github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.13+incompatible diff --git a/go.sum b/go.sum index aad90f981c5..0857ec8677c 100644 --- a/go.sum +++ b/go.sum @@ -71,6 +71,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqTKo= github.com/caddyserver/caddy v1.0.1/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= +github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw1w= +github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.1.0-0.20181214143942-ba49f56771b8/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= From ee84d4555ffd9f1c98bdc8e9eb7312817add36d4 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2019 07:53:25 -0700 Subject: [PATCH 104/324] build(deps): bump github.com/Azure/go-autorest/autorest (#3149) Bumps [github.com/Azure/go-autorest/autorest](https://github.com/Azure/go-autorest) from 0.8.0 to 0.9.0. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/autorest/v0.8.0...autorest/v0.9.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 866284a5bbb..d034595f455 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible - github.com/Azure/go-autorest/autorest v0.8.0 + github.com/Azure/go-autorest/autorest v0.9.0 github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect diff --git a/go.sum b/go.sum index 0857ec8677c..8d8df10f8ab 100644 --- a/go.sum +++ b/go.sum @@ -15,11 +15,15 @@ github.com/Azure/go-autorest/autorest v0.5.0 h1:Mlm9qy2fpQ9MvfyI41G2Zf5B4CsgjjNb github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= github.com/Azure/go-autorest/autorest v0.8.0 h1:hdhcXqJ/T98aigNKEs1LjXlGbxWGSIP0rA/GMc15UPA= github.com/Azure/go-autorest/autorest v0.8.0/go.mod h1:io2I5Mipuszn4buXaxt/RzC43Yf6aBtkgyUFF2HBdLY= +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.2.0 h1:7IBDu1jgh+ADHXnEYExkV9RE/ztOOlxdACkkPRthGKw= github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.4.0 h1:e4yveyEm5WvJDtmxJCY8fW1d3MY7vlQw/dVgL7kEmAg= github.com/Azure/go-autorest/autorest/adal v0.4.0/go.mod h1:n1iM1u+ZMYni6IIY+FB82IT/DbSdC3T3gO1qFQ/3jEw= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 h1:YgO/vSnJEc76NLw2ecIXvXa8bDWiqf1pOJzARAoZsYU= github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= github.com/Azure/go-autorest/autorest/azure/cli v0.1.0 h1:YTtBrcb6mhA+PoSW8WxFDoIIyjp13XqJeX80ssQtri4= @@ -36,6 +40,8 @@ github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0Sub github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/Azure/go-autorest/tracing v0.4.0 h1:r5D+n0u8XGFQIpIYk0xlKLDgiArnnIsfUjWANW5Wto0= github.com/Azure/go-autorest/tracing v0.4.0/go.mod h1:sVZ/n8H0f4naUjHNvSe2qjNiC3oV6+8CCqU9mhEvav8= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= From 95f412a5be104615424d3f81bbe7ff8e8620e7aa Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2019 07:54:07 -0700 Subject: [PATCH 105/324] build(deps): bump google.golang.org/api from 0.7.0 to 0.8.0 (#3154) Bumps [google.golang.org/api](https://github.com/google/google-api-go-client) from 0.7.0 to 0.8.0. - [Release notes](https://github.com/google/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/master/CHANGES.md) - [Commits](https://github.com/google/google-api-go-client/compare/v0.7.0...v0.8.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d034595f455..9253e79c5ea 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect - google.golang.org/api v0.7.0 + google.golang.org/api v0.8.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.22.0 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 diff --git a/go.sum b/go.sum index 8d8df10f8ab..b69d5a4b295 100644 --- a/go.sum +++ b/go.sum @@ -441,6 +441,8 @@ google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMt google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 66a9c7b82a6a98b710600c03f2bf800c178f78a7 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2019 07:54:18 -0700 Subject: [PATCH 106/324] build(deps): bump google.golang.org/grpc from 1.22.0 to 1.23.0 (#3155) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.22.0 to 1.23.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.22.0...v1.23.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 9253e79c5ea..6cb5da9291f 100644 --- a/go.mod +++ b/go.mod @@ -61,7 +61,7 @@ require ( golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/api v0.8.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect - google.golang.org/grpc v1.22.0 + google.golang.org/grpc v1.23.0 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190313235455-40a48860b5ab diff --git a/go.sum b/go.sum index b69d5a4b295..6d3a1cf4101 100644 --- a/go.sum +++ b/go.sum @@ -470,6 +470,8 @@ google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 h1:2LhklnAJsRSelbnBrrE5QuRleRDkmOh2JWxOtIX6yec= gopkg.in/DataDog/dd-trace-go.v1 v1.15.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= From 9f950d8bacd75cd8b2f9cbdfbbd721a614a97012 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2019 08:25:10 -0700 Subject: [PATCH 107/324] build(deps): bump github.com/Azure/go-autorest/autorest/azure/auth (#3150) Bumps [github.com/Azure/go-autorest/autorest/azure/auth](https://github.com/Azure/go-autorest) from 0.1.0 to 0.3.0. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/logger/v0.1.0...tracing/v0.3.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 6cb5da9291f..70a03876632 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible github.com/Azure/go-autorest/autorest v0.9.0 - github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 + github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/aws/aws-sdk-go v1.22.3 diff --git a/go.sum b/go.sum index 6d3a1cf4101..46abd9715ba 100644 --- a/go.sum +++ b/go.sum @@ -24,14 +24,23 @@ github.com/Azure/go-autorest/autorest/adal v0.4.0 h1:e4yveyEm5WvJDtmxJCY8fW1d3MY github.com/Azure/go-autorest/autorest/adal v0.4.0/go.mod h1:n1iM1u+ZMYni6IIY+FB82IT/DbSdC3T3gO1qFQ/3jEw= github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 h1:YgO/vSnJEc76NLw2ecIXvXa8bDWiqf1pOJzARAoZsYU= github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= +github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 h1:JwftqZDtWkr3qt1kcEgPd7H57uCHsXKXf66agWUQcGw= +github.com/Azure/go-autorest/autorest/azure/auth v0.3.0/go.mod h1:CI4BQYBct8NS7BXNBBX+RchsFsUu5+oz+OSyR/ZIi7U= github.com/Azure/go-autorest/autorest/azure/cli v0.1.0 h1:YTtBrcb6mhA+PoSW8WxFDoIIyjp13XqJeX80ssQtri4= github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= From 960474aab95f21244e10b1b8ac9873e4f18db525 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2019 08:58:16 -0700 Subject: [PATCH 108/324] build(deps): bump github.com/aws/aws-sdk-go from 1.22.3 to 1.23.3 (#3151) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.22.3 to 1.23.3. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.22.3...v1.23.3) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 70a03876632..221f16a721a 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect - github.com/aws/aws-sdk-go v1.22.3 + github.com/aws/aws-sdk-go v1.23.3 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/bbolt v1.3.2 // indirect diff --git a/go.sum b/go.sum index 46abd9715ba..cf1a1e9e400 100644 --- a/go.sum +++ b/go.sum @@ -78,6 +78,8 @@ github.com/aws/aws-sdk-go v1.21.2 h1:CqbWrQzi7s8J2F0TRRdLvTr0+bt5Zxo2IDoFNGsAiUg github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.22.3 h1:6hh+6uqguPNPLtnb6rYLJnonwixttKMbvZYWVUdms98= github.com/aws/aws-sdk-go v1.22.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.3 h1:Ty/4P6tOFJkDnKDrFJWnveznvESblf8QOheD1CwQPDU= +github.com/aws/aws-sdk-go v1.23.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 297299e308f23ecdda75bd729da02caee2846f78 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 19 Aug 2019 08:58:34 -0700 Subject: [PATCH 109/324] build(deps): bump github.com/miekg/dns from 1.1.15 to 1.1.16 (#3153) Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.15 to 1.1.16. - [Release notes](https://github.com/miekg/dns/releases) - [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release) - [Commits](https://github.com/miekg/dns/compare/v1.1.15...v1.1.16) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 221f16a721a..743e3d0b306 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/jonboulle/clockwork v0.1.0 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/miekg/dns v1.1.15 + github.com/miekg/dns v1.1.16 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.1 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect diff --git a/go.sum b/go.sum index cf1a1e9e400..a923ad77c2c 100644 --- a/go.sum +++ b/go.sum @@ -247,6 +247,8 @@ github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJ github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.16 h1:iMEQ/IVHxPTtx2Q07JP/k4CKRvSjiAZjZ0hnhgYEDmE= +github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -349,6 +351,7 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -371,6 +374,7 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -402,6 +406,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From eae97e84ae69ff5550e43a88021baf96a88bc0e1 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Mon, 19 Aug 2019 11:06:51 -0700 Subject: [PATCH 110/324] Update replace statement in go.mod: (#3157) ``` replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.16 ``` so that we only end up with one version of github.com/miekg/dns (caddy still use 1.1.3) Signed-off-by: Yong Tang --- go.mod | 3 ++- go.sum | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 743e3d0b306..c25e0327360 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect + github.com/apache/thrift v0.12.0 // indirect github.com/aws/aws-sdk-go v1.23.3 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe @@ -73,4 +74,4 @@ require ( sigs.k8s.io/yaml v1.1.0 // indirect ) -replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.15 +replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.16 diff --git a/go.sum b/go.sum index a923ad77c2c..ab975902d2e 100644 --- a/go.sum +++ b/go.sum @@ -178,6 +178,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= From 25632d6a23c7d3449537a3cf8d507c61173b1972 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Tue, 20 Aug 2019 21:00:19 +0800 Subject: [PATCH 111/324] fix mis-spelling in owners_generate.go (#3159) --- owners_generate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/owners_generate.go b/owners_generate.go index 90eebfc2373..10160febdac 100644 --- a/owners_generate.go +++ b/owners_generate.go @@ -70,7 +70,7 @@ var Owners = []string{` return } -// owners parses a owner file without knowning a whole lot about its structure. +// owners parses a owner file without knowing a whole lot about its structure. func owners(path string, owners map[string]struct{}) (map[string]struct{}, error) { file, err := ioutil.ReadFile(path) if err != nil { From 27f44f60eed0797d19be6d99ccaae067bc41c337 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Wed, 21 Aug 2019 21:16:00 +0800 Subject: [PATCH 112/324] fix mis-spelling in clouddns.go (#3166) --- plugin/clouddns/clouddns.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/clouddns/clouddns.go b/plugin/clouddns/clouddns.go index ab04e5f75fd..3546a4fea3e 100644 --- a/plugin/clouddns/clouddns.go +++ b/plugin/clouddns/clouddns.go @@ -107,7 +107,7 @@ func (h *CloudDNS) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Ms return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) } - z, ok := h.zones[zName] // ok true if we are authoritive for the zone + z, ok := h.zones[zName] // ok true if we are authoritative for the zone if !ok || z == nil { return dns.RcodeServerFailure, nil } From f888c5f7f62e8f789fe4939faaa9e3b3a3f7c554 Mon Sep 17 00:00:00 2001 From: Julien Garcia Gonzalez Date: Wed, 21 Aug 2019 15:34:21 +0200 Subject: [PATCH 113/324] Validate zone during normalization (#3165) --- core/dnsserver/address.go | 9 ++++++++- core/dnsserver/address_test.go | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/core/dnsserver/address.go b/core/dnsserver/address.go index 1a69c33b85e..9146ef650a4 100644 --- a/core/dnsserver/address.go +++ b/core/dnsserver/address.go @@ -3,6 +3,7 @@ package dnsserver import ( "fmt" "net" + "net/url" "strings" "github.com/coredns/coredns/plugin" @@ -52,7 +53,13 @@ func normalizeZone(str string) (zoneAddr, error) { } } - return zoneAddr{Zone: dns.Fqdn(host), Port: port, Transport: trans, IPNet: ipnet}, nil + z := zoneAddr{Zone: dns.Fqdn(host), Port: port, Transport: trans, IPNet: ipnet} + _, err = url.ParseRequestURI(z.String()) + if err != nil { + return zoneAddr{}, err + } + + return z, nil } // SplitProtocolHostPort splits a full formed address like "dns://[::1]:53" into parts. diff --git a/core/dnsserver/address_test.go b/core/dnsserver/address_test.go index 6d4d0beab23..86360d8f0ec 100644 --- a/core/dnsserver/address_test.go +++ b/core/dnsserver/address_test.go @@ -28,6 +28,7 @@ func TestNormalizeZone(t *testing.T) { {"https://.:8443", "https://.:8443", false}, {"https://..", "://:", true}, {"https://.:", "://:", true}, + {"dns://.:1053{.:53", "://:", true}, } { addr, err := normalizeZone(test.input) actual := addr.String() From 1b3fb834daec02aedbb7624622edc34c1420c850 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Wed, 21 Aug 2019 08:01:45 -0700 Subject: [PATCH 114/324] Revert "Validate zone during normalization (#3165)" (#3167) This reverts commit f888c5f7f62e8f789fe4939faaa9e3b3a3f7c554. --- core/dnsserver/address.go | 9 +-------- core/dnsserver/address_test.go | 1 - 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/core/dnsserver/address.go b/core/dnsserver/address.go index 9146ef650a4..1a69c33b85e 100644 --- a/core/dnsserver/address.go +++ b/core/dnsserver/address.go @@ -3,7 +3,6 @@ package dnsserver import ( "fmt" "net" - "net/url" "strings" "github.com/coredns/coredns/plugin" @@ -53,13 +52,7 @@ func normalizeZone(str string) (zoneAddr, error) { } } - z := zoneAddr{Zone: dns.Fqdn(host), Port: port, Transport: trans, IPNet: ipnet} - _, err = url.ParseRequestURI(z.String()) - if err != nil { - return zoneAddr{}, err - } - - return z, nil + return zoneAddr{Zone: dns.Fqdn(host), Port: port, Transport: trans, IPNet: ipnet}, nil } // SplitProtocolHostPort splits a full formed address like "dns://[::1]:53" into parts. diff --git a/core/dnsserver/address_test.go b/core/dnsserver/address_test.go index 86360d8f0ec..6d4d0beab23 100644 --- a/core/dnsserver/address_test.go +++ b/core/dnsserver/address_test.go @@ -28,7 +28,6 @@ func TestNormalizeZone(t *testing.T) { {"https://.:8443", "https://.:8443", false}, {"https://..", "://:", true}, {"https://.:", "://:", true}, - {"dns://.:1053{.:53", "://:", true}, } { addr, err := normalizeZone(test.input) actual := addr.String() From 6881d6d5854a718cd5c754f496a53f9931d6e34e Mon Sep 17 00:00:00 2001 From: Sakura <40171666+928234269@users.noreply.github.com> Date: Thu, 22 Aug 2019 00:15:11 +0800 Subject: [PATCH 115/324] fix typo in coredns-006.md (#3168) Signed-off-by: Sakura --- notes/coredns-006.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notes/coredns-006.md b/notes/coredns-006.md index be0b1289c23..981949f0ff5 100644 --- a/notes/coredns-006.md +++ b/notes/coredns-006.md @@ -29,7 +29,7 @@ Fixed: ### New -* *reverse* plugin: allows CoreDNS to respond dynamicly to an PTR request and the related +* *reverse* plugin: allows CoreDNS to respond dynamically to an PTR request and the related A/AAAA request. ### Improvements/changes From 3f47fc8ba4f43a074c078c71fccb8a0463d31672 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Wed, 21 Aug 2019 16:08:55 -0400 Subject: [PATCH 116/324] typo fixes (#3169) * spelling fixes * its/it's --- Makefile.fuzz | 2 +- core/dnsserver/server.go | 2 +- notes/coredns-003.md | 2 +- notes/coredns-1.2.6.md | 2 +- notes/coredns-1.3.1.md | 2 +- notes/coredns-1.6.2.md | 2 +- plugin/auto/zone.go | 4 ++-- plugin/autopath/autopath.go | 2 +- plugin/azure/azure.go | 2 +- plugin/chaos/README.md | 2 +- plugin/file/ent_test.go | 1 - plugin/file/lookup.go | 4 ++-- plugin/health/README.md | 2 +- plugin/kubernetes/external.go | 4 ++-- plugin/kubernetes/kubernetes.go | 2 +- plugin/kubernetes/object/endpoint.go | 2 +- plugin/kubernetes/setup.go | 2 +- plugin/metadata/README.md | 2 +- plugin/normalize.go | 6 +++--- plugin/pkg/response/typify.go | 2 +- plugin/pkg/response/typify_test.go | 2 +- plugin/plugin.go | 2 +- plugin/pprof/pprof.go | 2 +- plugin/ready/README.md | 2 +- plugin/reload/README.md | 2 +- plugin/template/template_test.go | 2 +- test/ds_file_test.go | 2 +- test/secondary_test.go | 2 +- 28 files changed, 32 insertions(+), 33 deletions(-) diff --git a/Makefile.fuzz b/Makefile.fuzz index 4d518972002..89b3c6f02b1 100644 --- a/Makefile.fuzz +++ b/Makefile.fuzz @@ -9,7 +9,7 @@ # make -f Makefile.fuzz forward # # Each plugin that wants to join the fuzzing fray only needs to add a fuzz.go that calls -# the plugins's ServeDNS and used the plugin/pkg/fuzz for the Do function. +# the plugin's ServeDNS and used the plugin/pkg/fuzz for the Do function. # # Installing go-fuzz is very tricky because it does not support Go modules, see the `Makefile` # for the current trickery. The following may do the trick: diff --git a/core/dnsserver/server.go b/core/dnsserver/server.go index 4a143f9b3e4..2f0454e48c0 100644 --- a/core/dnsserver/server.go +++ b/core/dnsserver/server.go @@ -259,7 +259,7 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) // The type is DS, keep the handler, but keep on searching as maybe we are serving // the parent as well and the DS should be routed to it - this will probably *misroute* DS // queries to a possibly grand parent, but there is no way for us to know at this point - // if there is an actually delegation from grandparent -> parent -> zone. + // if there is an actual delegation from grandparent -> parent -> zone. // In all fairness: direct DS queries should not be needed. dshandler = h } diff --git a/notes/coredns-003.md b/notes/coredns-003.md index c1656e7a1c0..ce43b6842cb 100644 --- a/notes/coredns-003.md +++ b/notes/coredns-003.md @@ -28,7 +28,7 @@ Refused queries are properly logged and exported if metrics are enabled. * *auto*: numerous bugfixes. * *file*: fix data race in reload process and also reload a zone when it is `mv`ed (newly created) into place. Also rewrite the zone lookup algorithm and be more standards compliant, esp. in the area of DNSSEC, wildcards and empty-non-terminals; handle secure delegations. -* *kubernetes*: vender the k8s dependency and updates to be compatible with Kubernetes 1.4 and 1.5. +* *kubernetes*: vendor the k8s dependency and updates to be compatible with Kubernetes 1.4 and 1.5. Multiple cleanups and fixes. Kubernetes services can now be resolved. # Contributors diff --git a/notes/coredns-1.2.6.md b/notes/coredns-1.2.6.md index f8fbdeec978..1e383013784 100644 --- a/notes/coredns-1.2.6.md +++ b/notes/coredns-1.2.6.md @@ -27,7 +27,7 @@ kernels. * [*cache*](/plugins/cache) got some minor optimizations. -* [*errors*](/plugins/errors) (and *log*) gotten a new option (`consolidate`) to suppress loging. +* [*errors*](/plugins/errors) (and *log*) gotten a new option (`consolidate`) to suppress logging. * [*hosts*](/plugins/hosts) will now read the `hosts` file without holding a write lock. diff --git a/notes/coredns-1.3.1.md b/notes/coredns-1.3.1.md index e129bdfdfc7..acab8bd949a 100644 --- a/notes/coredns-1.3.1.md +++ b/notes/coredns-1.3.1.md @@ -26,7 +26,7 @@ Mostly documentation updates in various plugins. Plus a small fix where we stop * [*log*](/plugins/log) now allows multiple names to be specified. - * [*import*](/plugins/import) was added to give it a README.md to make it's documentation more + * [*import*](/plugins/import) was added to give it a README.md to make its documentation more discoverable. * [*kubernetes*](/plugins/kubernetes) `TTL` is also applied to negative responses (NXDOMAIN, etc). diff --git a/notes/coredns-1.6.2.md b/notes/coredns-1.6.2.md index 7d1d8ed5609..f942de7b3fe 100644 --- a/notes/coredns-1.6.2.md +++ b/notes/coredns-1.6.2.md @@ -12,7 +12,7 @@ The CoreDNS team has released This is a bug fix release, but it also features a new plugin called [*azure*](/plugins/azure). -It's compiled with Go 1.12.8 that incorperates fixes for HTTP/2 that may impact you if you use +It's compiled with Go 1.12.8 that incorporates fixes for HTTP/2 that may impact you if you use [DoH](https://tools.ietf.org/html/rfc8484). # Plugins diff --git a/plugin/auto/zone.go b/plugin/auto/zone.go index c139e724198..0a12ca39f98 100644 --- a/plugin/auto/zone.go +++ b/plugin/auto/zone.go @@ -7,7 +7,7 @@ import ( "github.com/coredns/coredns/plugin/file" ) -// Zones maps zone names to a *Zone. This keep track of what we zones we have loaded at +// Zones maps zone names to a *Zone. This keeps track of what zones we have loaded at // any one time. type Zones struct { Z map[string]*file.Zone // A map mapping zone (origin) to the Zone's data. @@ -56,7 +56,7 @@ func (z *Zones) Add(zo *file.Zone, name string) { z.Unlock() } -// Remove removes the zone named name from z. It also stop the zone's reload goroutine. +// Remove removes the zone named name from z. It also stops the zone's reload goroutine. func (z *Zones) Remove(name string) { z.Lock() diff --git a/plugin/autopath/autopath.go b/plugin/autopath/autopath.go index 73882cdbd01..c2f541644f7 100644 --- a/plugin/autopath/autopath.go +++ b/plugin/autopath/autopath.go @@ -3,7 +3,7 @@ Package autopath implements autopathing. This is a hack; it shortcuts the client's search path resolution by performing these lookups on the server... The server has a copy (via AutoPathFunc) of the client's search path and on -receiving a query it first establish if the suffix matches the FIRST configured +receiving a query it first establishes if the suffix matches the FIRST configured element. If no match can be found the query will be forwarded up the plugin chain without interference (iff 'fallthrough' has been set). diff --git a/plugin/azure/azure.go b/plugin/azure/azure.go index 38d9eecfabc..8432af31699 100644 --- a/plugin/azure/azure.go +++ b/plugin/azure/azure.go @@ -206,7 +206,7 @@ func (h *Azure) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) return plugin.NextOrFailure(h.Name(), h.Next, ctx, w, r) } - zones, ok := h.zones[zone] // ok true if we are authoritive for the zone. + zones, ok := h.zones[zone] // ok true if we are authoritative for the zone. if !ok || zones == nil { return dns.RcodeServerFailure, nil } diff --git a/plugin/chaos/README.md b/plugin/chaos/README.md index 3cd6dff41dc..9ce5216ae68 100644 --- a/plugin/chaos/README.md +++ b/plugin/chaos/README.md @@ -7,7 +7,7 @@ ## Description This is useful for retrieving version or author information from the server by querying a TXT record -for a special domainname in the CH class. +for a special domain name in the CH class. ## Syntax diff --git a/plugin/file/ent_test.go b/plugin/file/ent_test.go index 355cc65c098..73f50858397 100644 --- a/plugin/file/ent_test.go +++ b/plugin/file/ent_test.go @@ -55,7 +55,6 @@ func TestLookupEnt(t *testing.T) { } } -// fdjfdjkf const dbMiekENTNL = `; File written on Sat Apr 2 16:43:11 2016 ; dnssec_signzone version 9.10.3-P4-Ubuntu miek.nl. 1800 IN SOA linode.atoom.net. miek.miek.nl. ( diff --git a/plugin/file/lookup.go b/plugin/file/lookup.go index 9c2c1959ba2..15ea096bbe5 100644 --- a/plugin/file/lookup.go +++ b/plugin/file/lookup.go @@ -69,7 +69,7 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) // // Main for-loop handles delegation and finding or not finding the qname. // If found we check if it is a CNAME/DNAME and do CNAME processing - // We also check if we have type and do a nodata resposne. + // We also check if we have type and do a nodata response. // // If not found, we check the potential wildcard, and use that for further processing. // If not found and no wildcard we will process this as an NXDOMAIN response. @@ -169,7 +169,7 @@ func (z *Zone) Lookup(ctx context.Context, state request.Request, qname string) return nil, ret, nil, NoData } - // Additional section processing for MX, SRV. Check response and see if any of the names are in baliwick - + // Additional section processing for MX, SRV. Check response and see if any of the names are in bailiwick - // if so add IP addresses to the additional section. additional := z.additionalProcessing(rrs, do) diff --git a/plugin/health/README.md b/plugin/health/README.md index eb86635ad10..e6fc56e80e0 100644 --- a/plugin/health/README.md +++ b/plugin/health/README.md @@ -44,7 +44,7 @@ net { } ~~~ -Doing this is supported but both endponts ":8080" and ":8081" will export the exact same health. +Doing this is supported but both endpoints ":8080" and ":8081" will export the exact same health. ## Metrics diff --git a/plugin/kubernetes/external.go b/plugin/kubernetes/external.go index 91a8a2ed159..4b110446070 100644 --- a/plugin/kubernetes/external.go +++ b/plugin/kubernetes/external.go @@ -21,7 +21,7 @@ func (k *Kubernetes) External(state request.Request) ([]msg.Service, int) { if last < 0 { return nil, dns.RcodeServerFailure } - // We dealing with a fairly normal domain name here, but; we still need to have the service + // We are dealing with a fairly normal domain name here, but we still need to have the service // and the namespace: // service.namespace. // @@ -86,7 +86,7 @@ func (k *Kubernetes) External(state request.Request) ([]msg.Service, int) { // ExternalAddress returns the external service address(es) for the CoreDNS service. func (k *Kubernetes) ExternalAddress(state request.Request) []dns.RR { // This is probably wrong, because of all the fallback behavior of k.nsAddr, i.e. can get - // an address that isn't reacheable from outside the cluster. + // an address that isn't reachable from outside the cluster. rrs := []dns.RR{k.nsAddr()} return rrs } diff --git a/plugin/kubernetes/kubernetes.go b/plugin/kubernetes/kubernetes.go index fa3ef044671..a02c3569866 100644 --- a/plugin/kubernetes/kubernetes.go +++ b/plugin/kubernetes/kubernetes.go @@ -179,7 +179,7 @@ func (k *Kubernetes) getClientConfig() (*rest.Config, error) { } // Connect to API from out of cluster - // Only the first one is used. We will deprecated multiple endpoints later. + // Only the first one is used. We will deprecate multiple endpoints later. clusterinfo.Server = k.APIServerList[0] if len(k.APICertAuth) > 0 { diff --git a/plugin/kubernetes/object/endpoint.go b/plugin/kubernetes/object/endpoint.go index bd0afeef3dc..b2c77fc106d 100644 --- a/plugin/kubernetes/object/endpoint.go +++ b/plugin/kubernetes/object/endpoint.go @@ -62,7 +62,7 @@ func ToEndpoints(obj interface{}) interface{} { Addresses: make([]EndpointAddress, len(eps.Addresses)), } if len(eps.Ports) == 0 { - // Add sentinal if there are no ports. + // Add sentinel if there are no ports. sub.Ports = []EndpointPort{{Port: -1}} } else { sub.Ports = make([]EndpointPort, len(eps.Ports)) diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go index 00c69818111..eb33936fa77 100644 --- a/plugin/kubernetes/setup.go +++ b/plugin/kubernetes/setup.go @@ -184,7 +184,7 @@ func ParseStanza(c *caddy.Controller) (*Kubernetes, error) { case "endpoint": args := c.RemainingArgs() if len(args) > 0 { - // Multiple endoints are deprecated but still could be specified, + // Multiple endpoints are deprecated but still could be specified, // only the first one be used, though k8s.APIServerList = args if len(args) > 1 { diff --git a/plugin/metadata/README.md b/plugin/metadata/README.md index c69650927d8..e9ca22584d5 100644 --- a/plugin/metadata/README.md +++ b/plugin/metadata/README.md @@ -8,7 +8,7 @@ By enabling *metadata* any plugin that implements [metadata.Provider interface](https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider) will be called for -each DNS query, at beginning of the process for that query, in order to add it's own meta data to +each DNS query, at beginning of the process for that query, in order to add its own meta data to context. The meta data collected will be available for all plugins, via the Context parameter provided in the diff --git a/plugin/normalize.go b/plugin/normalize.go index 6402bf8d905..1289207fde8 100644 --- a/plugin/normalize.go +++ b/plugin/normalize.go @@ -12,7 +12,7 @@ import ( // See core/dnsserver/address.go - we should unify these two impls. -// Zones respresents a lists of zone names. +// Zones represents a lists of zone names. type Zones []string // Matches checks if qname is a subdomain of any of the zones in z. The match @@ -65,7 +65,7 @@ func (h Host) Normalize() string { s := string(h) _, s = parse.Transport(s) - // The error can be ignore here, because this function is called after the corefile has already been vetted. + // The error can be ignored here, because this function is called after the corefile has already been vetted. host, _, _, _ := SplitHostPort(s) return Name(host).Normalize() } @@ -76,7 +76,7 @@ func (h Host) Normalize() string { func SplitHostPort(s string) (host, port string, ipnet *net.IPNet, err error) { // If there is: :[0-9]+ on the end we assume this is the port. This works for (ascii) domain // names and our reverse syntax, which always needs a /mask *before* the port. - // So from the back, find first colon, and then check if its a number. + // So from the back, find first colon, and then check if it's a number. host = s colon := strings.LastIndex(s, ":") diff --git a/plugin/pkg/response/typify.go b/plugin/pkg/response/typify.go index f4dd91fa4c3..df314d41a44 100644 --- a/plugin/pkg/response/typify.go +++ b/plugin/pkg/response/typify.go @@ -15,7 +15,7 @@ const ( NoError Type = iota // NameError is a NXDOMAIN in header, SOA in auth. NameError - // ServerError is a set of errors we want to cache, for now it containers SERVFAIL and NOTIMPL. + // ServerError is a set of errors we want to cache, for now it contains SERVFAIL and NOTIMPL. ServerError // NoData indicates name found, but not the type: NOERROR in header, SOA in auth. NoData diff --git a/plugin/pkg/response/typify_test.go b/plugin/pkg/response/typify_test.go index f22b806fbd6..fca6ba10068 100644 --- a/plugin/pkg/response/typify_test.go +++ b/plugin/pkg/response/typify_test.go @@ -49,7 +49,7 @@ func TestTypifyRRSIG(t *testing.T) { } func TestTypifyImpossible(t *testing.T) { - // create impossible message that denies it's own existence + // create impossible message that denies its own existence m := new(dns.Msg) m.SetQuestion("bar.www.example.org.", dns.TypeAAAA) m.Rcode = dns.RcodeNameError // name does not exist diff --git a/plugin/plugin.go b/plugin/plugin.go index 7995cbba8a8..ee1bf84296b 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -84,7 +84,7 @@ func NextOrFailure(name string, next Handler, ctx context.Context, w dns.Respons } // ClientWrite returns true if the response has been written to the client. -// Each plugin to adhire to this protocol. +// Each plugin to adhere to this protocol. func ClientWrite(rcode int) bool { switch rcode { case dns.RcodeServerFailure: diff --git a/plugin/pprof/pprof.go b/plugin/pprof/pprof.go index 8367a307175..c5301008918 100644 --- a/plugin/pprof/pprof.go +++ b/plugin/pprof/pprof.go @@ -1,4 +1,4 @@ -// Package pprof implement a debug endpoint for getting profiles using the +// Package pprof implements a debug endpoint for getting profiles using the // go pprof tooling. package pprof diff --git a/plugin/ready/README.md b/plugin/ready/README.md index d4a9bc5febf..d2e430de7c0 100644 --- a/plugin/ready/README.md +++ b/plugin/ready/README.md @@ -13,7 +13,7 @@ will not be queried again. Each Server Block that enables the *ready* plugin will have the plugins *in that server block* report readiness into the /ready endpoint that runs on the same port. This also means that the -*same* plugin with different configurations (in potentialy *different* Server Blocks) will have +*same* plugin with different configurations (in potentially *different* Server Blocks) will have their readiness reported as the union of their respective readinesses. ## Syntax diff --git a/plugin/reload/README.md b/plugin/reload/README.md index 89a49f14ae3..ca80f8602f1 100644 --- a/plugin/reload/README.md +++ b/plugin/reload/README.md @@ -85,7 +85,7 @@ is already listening on that port. The process reloads and performs the followin 4. fail loading the new Corefile, abort and keep using the old process After the aborted attempt to reload we are left with the old processes running, but the listener is -closed in step 1; so the health endpoint is broken. The same can hopen in the prometheus metrics plugin. +closed in step 1; so the health endpoint is broken. The same can happen in the prometheus metrics plugin. In general be careful with assigning new port and expecting reload to work fully. diff --git a/plugin/template/template_test.go b/plugin/template/template_test.go index 1aa26229fb2..7e8f988f2cc 100644 --- a/plugin/template/template_test.go +++ b/plugin/template/template_test.go @@ -421,7 +421,7 @@ func TestHandler(t *testing.T) { } } -// TestMultiSection verfies that a corefile with multiple but different template sections works +// TestMultiSection verifies that a corefile with multiple but different template sections works func TestMultiSection(t *testing.T) { ctx := context.TODO() diff --git a/test/ds_file_test.go b/test/ds_file_test.go index 7d4ae0cdc6c..84b383368bc 100644 --- a/test/ds_file_test.go +++ b/test/ds_file_test.go @@ -8,7 +8,7 @@ import ( "github.com/miekg/dns" ) -// Using miek.nl here because this is the easiest zone to get access to and it's masters +// Using miek.nl here because this is the easiest zone to get access to and its masters // run both NSD and BIND9, making checks like "what should we actually return" super easy. var dsTestCases = []mtest.Case{ { diff --git a/test/secondary_test.go b/test/secondary_test.go index c0df1632b1b..fd9d4d5621b 100644 --- a/test/secondary_test.go +++ b/test/secondary_test.go @@ -71,7 +71,7 @@ func TestSecondaryZoneTransfer(t *testing.T) { m.SetQuestion("example.org.", dns.TypeSOA) var r *dns.Msg - // This is now async; we we need to wait for it to be transferred. + // This is now async; we need to wait for it to be transferred. for i := 0; i < 10; i++ { r, err = dns.Exchange(m, udp) if len(r.Answer) != 0 { From 434ac69a03579708bd3a7311961a0a18adba61f2 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Thu, 22 Aug 2019 21:59:12 +0800 Subject: [PATCH 117/324] fix wrong spells in parse_test.go (#3173) --- plugin/kubernetes/parse_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kubernetes/parse_test.go b/plugin/kubernetes/parse_test.go index 0ce4b7d98da..6fc635477e2 100644 --- a/plugin/kubernetes/parse_test.go +++ b/plugin/kubernetes/parse_test.go @@ -37,7 +37,7 @@ func TestParseRequest(t *testing.T) { } rs := r.String() if rs != tc.expected { - t.Errorf("Test %d, expected (stringyfied) recordRequest: %s, got %s", i, tc.expected, rs) + t.Errorf("Test %d, expected (stringified) recordRequest: %s, got %s", i, tc.expected, rs) } } } From 7918190901de84e3aeeda41c80bc30ed40fdf432 Mon Sep 17 00:00:00 2001 From: Guangming Wang Date: Thu, 22 Aug 2019 22:35:55 +0800 Subject: [PATCH 118/324] Cleanup: fix some typos in code comment (#3172) Signed-off-by: Guangming Wang --- plugin/etcd/msg/path.go | 2 +- plugin/forward/forward.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/etcd/msg/path.go b/plugin/etcd/msg/path.go index c90798035ef..bfa4588632e 100644 --- a/plugin/etcd/msg/path.go +++ b/plugin/etcd/msg/path.go @@ -29,7 +29,7 @@ func Domain(s string) string { return dnsutil.Join(l[1 : len(l)-1]...) } -// PathWithWildcard ascts as Path, but if a name contains wildcards (* or any), the name will be +// PathWithWildcard acts as Path, but if a name contains wildcards (* or any), the name will be // chopped of before the (first) wildcard, and we do a higher level search and // later find the matching names. So service.*.skydns.local, will look for all // services under skydns.local and will later check for names that match diff --git a/plugin/forward/forward.go b/plugin/forward/forward.go index da2b2beb49f..4abd80baedb 100644 --- a/plugin/forward/forward.go +++ b/plugin/forward/forward.go @@ -89,7 +89,7 @@ func (f *Forward) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg if fails < len(f.proxies) { continue } - // All upstream proxies are dead, assume healtcheck is completely broken and randomly + // All upstream proxies are dead, assume healthcheck is completely broken and randomly // select an upstream to connect to. r := new(random) proxy = r.List(f.proxies)[0] From f8e0ae63305ac712d8e40dfa72a6af497205f371 Mon Sep 17 00:00:00 2001 From: xieyanker Date: Fri, 23 Aug 2019 00:44:25 +0800 Subject: [PATCH 119/324] fix typo "dnstop" >> "dnstap" (#3170) --- plugin/dnstap/dnstapio/io.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/dnstap/dnstapio/io.go b/plugin/dnstap/dnstapio/io.go index 65e2e222e35..9a4c26042ef 100644 --- a/plugin/dnstap/dnstapio/io.go +++ b/plugin/dnstap/dnstapio/io.go @@ -70,7 +70,7 @@ func (dio *dnstapIO) newConnect() error { return dio.enc.resetWriter(dio.conn) } -// Connect connects to the dnstop endpoint. +// Connect connects to the dnstap endpoint. func (dio *dnstapIO) Connect() { if err := dio.newConnect(); err != nil { log.Error("No connection to dnstap endpoint") From 9f49d694e909651ceeb846a72269a28f55545f4f Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 22 Aug 2019 18:49:22 +0000 Subject: [PATCH 120/324] fuzz: fix rewrite crash by fixing fuzz/do.go (#3178) Automatically submitted. --- plugin/pkg/fuzz/do.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/plugin/pkg/fuzz/do.go b/plugin/pkg/fuzz/do.go index e2bc0edee4b..054c4298ab9 100644 --- a/plugin/pkg/fuzz/do.go +++ b/plugin/pkg/fuzz/do.go @@ -13,15 +13,19 @@ import ( // Do will fuzz p - used by gofuzz. See Makefile.fuzz for comments and context. func Do(p plugin.Handler, data []byte) int { ctx := context.TODO() - ret := 1 r := new(dns.Msg) if err := r.Unpack(data); err != nil { - ret = 0 + return 0 // plugin will never be called when this happens. + } + // If the data unpack into a dns msg, but does not have a proper question section discard it. + // The server parts make sure this is true before calling the plugins; mimic this behavior. + if len(r.Question) == 0 { + return 0 } if _, err := p.ServeDNS(ctx, &test.ResponseWriter{}, r); err != nil { - ret = 1 + return 1 } - return ret + return 0 } From 1590320ff2c1d3eb98dca2ed624a5c4244dde012 Mon Sep 17 00:00:00 2001 From: wwgfhf <51694849+wwgfhf@users.noreply.github.com> Date: Fri, 23 Aug 2019 14:29:17 +0800 Subject: [PATCH 121/324] fix typo (#3180) --- plugin/reload/reload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/reload/reload.go b/plugin/reload/reload.go index 59e040479b9..0e01f63b4e5 100644 --- a/plugin/reload/reload.go +++ b/plugin/reload/reload.go @@ -99,7 +99,7 @@ func hook(event caddy.EventName, info interface{}) error { // Let not try to restart with the same file, even though it is wrong. md5sum = s // now lets consider that plugin will not be reload, unless appear in next config file - // change status iof usage will be reset in setup if the plugin appears in config file + // change status of usage will be reset in setup if the plugin appears in config file r.setUsage(maybeUsed) _, err := instance.Restart(corefile) if err != nil { From 88faf63017769c8a49623b661eca1d6bd3834a5f Mon Sep 17 00:00:00 2001 From: Sakura <40171666+928234269@users.noreply.github.com> Date: Fri, 23 Aug 2019 14:29:39 +0800 Subject: [PATCH 122/324] fix typo in coredns-1.0.1.md (#3179) Signed-off-by: Sakura --- notes/coredns-1.0.1.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notes/coredns-1.0.1.md b/notes/coredns-1.0.1.md index 216afd46e34..e3cb5589893 100644 --- a/notes/coredns-1.0.1.md +++ b/notes/coredns-1.0.1.md @@ -16,7 +16,7 @@ One new plugin was added: *nsid*, that implements [RFC 5001](https://tools.ietf. ## Plugins * *file* fixes a crash when an request with a DO bit (pretty much the default) hits an unsigned zone. The default configuration should recover the go-routine, but this is nonetheless serious. *file* received some other fixes when returning (secure) delegations. * *dnstap* plugin is now 50% faster. -* *metrics* fixed the start time bucket for duration. +* *metrics* fixed the start time bucket for the duration. ## Contributors From 84988ce2c20a33a6c37e8136aecb023bad3f17ef Mon Sep 17 00:00:00 2001 From: Sakura <40171666+928234269@users.noreply.github.com> Date: Fri, 23 Aug 2019 14:50:54 +0800 Subject: [PATCH 123/324] fix typo in ADOPTERS.md (#3181) Signed-off-by: Sakura --- ADOPTERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ADOPTERS.md b/ADOPTERS.md index a67917f54b0..22abc9aefb4 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -9,7 +9,7 @@ * [Z Lab](https://zlab.co.jp) uses CoreDNS in production combination with Consul and Kubernetes Clusters. * [Serpro/estaleiro](estaleiro.serpro.gov.br) uses CoreDNS as Kubernetes' DNS Server, in production with tuned Kubernetes plugin options * [Lumo](https://thinklumo.com) uses CoreDNS as Kubernetes' DNS Server, in production and lab with default configuration -* [Booming Games](https://booming-games.com) uses CoreDNS in multiple Kubernetes clusters, with Federation plugin. expect going to production soon. +* [Booming Games](https://booming-games.com) uses CoreDNS in multiple Kubernetes clusters, with Federation plugin. expect to go into production soon. * [Sodimac](https://www.sodimac.cl) uses CoreDNS with Kubernetes in production with default configuration. * [Bose](https://www.bose.com/) uses CoreDNS with Kubernetes in production on very large cluster (over 250 nodes) * [farmotive](https://farmotive.io) uses CoreDNS in Kubernetes using default configuration, in its Lab. Expect to be in production soon. From 338d148c7845ca4e3a44253ac8b6f040c394b8c4 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Fri, 23 Aug 2019 12:54:06 -0400 Subject: [PATCH 124/324] plugin/k8s_external/kubernetes: handle NS records (#3160) * fix external ns records * use k8s service name for ns record * update test, add func comment * expand nsAddrs() test cases * support local ipv6 ip * use less confusing pod ip in test --- plugin/kubernetes/external.go | 10 +-- plugin/kubernetes/kubernetes.go | 38 ++++++----- plugin/kubernetes/kubernetes_test.go | 2 +- plugin/kubernetes/local.go | 11 ++-- plugin/kubernetes/ns.go | 91 ++++++++++++++++---------- plugin/kubernetes/ns_test.go | 98 ++++++++++++++++++++++++---- 6 files changed, 180 insertions(+), 70 deletions(-) diff --git a/plugin/kubernetes/external.go b/plugin/kubernetes/external.go index 4b110446070..42495a430b4 100644 --- a/plugin/kubernetes/external.go +++ b/plugin/kubernetes/external.go @@ -85,8 +85,10 @@ func (k *Kubernetes) External(state request.Request) ([]msg.Service, int) { // ExternalAddress returns the external service address(es) for the CoreDNS service. func (k *Kubernetes) ExternalAddress(state request.Request) []dns.RR { - // This is probably wrong, because of all the fallback behavior of k.nsAddr, i.e. can get - // an address that isn't reachable from outside the cluster. - rrs := []dns.RR{k.nsAddr()} - return rrs + // If CoreDNS is running inside the Kubernetes cluster: k.nsAddrs() will return the external IPs of the services + // targeting the CoreDNS Pod. + // If CoreDNS is running outside of the Kubernetes cluster: k.nsAddrs() will return the first non-loopback IP + // address seen on the local system it is running on. This could be the wrong answer if coredns is using the *bind* + // plugin to bind to a different IP address. + return k.nsAddrs(true, state.Zone) } diff --git a/plugin/kubernetes/kubernetes.go b/plugin/kubernetes/kubernetes.go index a02c3569866..e7b36917e8d 100644 --- a/plugin/kubernetes/kubernetes.go +++ b/plugin/kubernetes/kubernetes.go @@ -107,25 +107,33 @@ func (k *Kubernetes) Services(ctx context.Context, state request.Request, exact case dns.TypeNS: // We can only get here if the qname equals the zone, see ServeDNS in handler.go. - ns := k.nsAddr() - svc := msg.Service{Host: ns.A.String(), Key: msg.Path(state.QName(), coredns), TTL: k.ttl} - return []msg.Service{svc}, nil + nss := k.nsAddrs(false, state.Zone) + var svcs []msg.Service + for _, ns := range nss { + if ns.Header().Rrtype == dns.TypeA { + svcs = append(svcs, msg.Service{Host: ns.(*dns.A).A.String(), Key: msg.Path(ns.Header().Name, coredns), TTL: k.ttl}) + continue + } + if ns.Header().Rrtype == dns.TypeAAAA { + svcs = append(svcs, msg.Service{Host: ns.(*dns.AAAA).AAAA.String(), Key: msg.Path(ns.Header().Name, coredns), TTL: k.ttl}) + } + } + return svcs, nil } if isDefaultNS(state.Name(), state.Zone) { - ns := k.nsAddr() - - isIPv4 := ns.A.To4() != nil - - if !((state.QType() == dns.TypeA && isIPv4) || (state.QType() == dns.TypeAAAA && !isIPv4)) { - // NODATA - return nil, nil + nss := k.nsAddrs(false, state.Zone) + var svcs []msg.Service + for _, ns := range nss { + if ns.Header().Rrtype == dns.TypeA && state.QType() == dns.TypeA { + svcs = append(svcs, msg.Service{Host: ns.(*dns.A).A.String(), Key: msg.Path(state.QName(), coredns), TTL: k.ttl}) + continue + } + if ns.Header().Rrtype == dns.TypeAAAA && state.QType() == dns.TypeAAAA { + svcs = append(svcs, msg.Service{Host: ns.(*dns.AAAA).AAAA.String(), Key: msg.Path(state.QName(), coredns), TTL: k.ttl}) + } } - - // If this is an A request for "ns.dns", respond with a "fake" record for coredns. - // SOA records always use this hardcoded name - svc := msg.Service{Host: ns.A.String(), Key: msg.Path(state.QName(), coredns), TTL: k.ttl} - return []msg.Service{svc}, nil + return svcs, nil } s, e := k.Records(ctx, state, false) diff --git a/plugin/kubernetes/kubernetes_test.go b/plugin/kubernetes/kubernetes_test.go index 57b735d7b47..4fd8e22a6ce 100644 --- a/plugin/kubernetes/kubernetes_test.go +++ b/plugin/kubernetes/kubernetes_test.go @@ -327,7 +327,7 @@ func TestServicesAuthority(t *testing.T) { state := request.Request{ Req: &dns.Msg{Question: []dns.Question{{Name: test.qname, Qtype: test.qtype}}}, - Zone: "interwebs.test.", // must match from k.Zones[0] + Zone: k.Zones[0], } svcs, e := k.Services(context.TODO(), state, false, plugin.Options{}) if e != nil { diff --git a/plugin/kubernetes/local.go b/plugin/kubernetes/local.go index c5918a3060e..199af6f0dba 100644 --- a/plugin/kubernetes/local.go +++ b/plugin/kubernetes/local.go @@ -12,11 +12,14 @@ func localPodIP() net.IP { for _, addr := range addrs { ip, _, _ := net.ParseCIDR(addr.String()) - ip = ip.To4() - if ip == nil || ip.IsLoopback() { - continue + ip4 := ip.To4() + if ip4 != nil && !ip4.IsLoopback() { + return ip4 + } + ip6 := ip.To16() + if ip6 != nil && !ip6.IsLoopback() { + return ip6 } - return ip } return nil } diff --git a/plugin/kubernetes/ns.go b/plugin/kubernetes/ns.go index f3d33ee22d2..6d18c0b77f0 100644 --- a/plugin/kubernetes/ns.go +++ b/plugin/kubernetes/ns.go @@ -4,6 +4,7 @@ import ( "net" "strings" + "github.com/coredns/coredns/plugin/kubernetes/object" "github.com/miekg/dns" api "k8s.io/api/core/v1" ) @@ -12,54 +13,74 @@ func isDefaultNS(name, zone string) bool { return strings.Index(name, defaultNSName) == 0 && strings.Index(name, zone) == len(defaultNSName) } -// nsAddr return the A record for the CoreDNS service in the cluster. If it fails that it fallsback -// on the local address of the machine we're running on. -// -// This function is rather expensive to run. -func (k *Kubernetes) nsAddr() *dns.A { +// nsAddrs returns the A or AAAA records for the CoreDNS service in the cluster. If the service cannot be found, +// it returns a record for the the local address of the machine we're running on. +func (k *Kubernetes) nsAddrs(external bool, zone string) []dns.RR { var ( - svcName string - svcNamespace string + svcNames []string + svcIPs []net.IP ) - rr := new(dns.A) + // Find the CoreDNS Endpoint localIP := k.interfaceAddrsFunc() - rr.A = localIP + endpoints := k.APIConn.EpIndexReverse(localIP.String()) -FindEndpoint: - for _, ep := range k.APIConn.EpIndexReverse(localIP.String()) { - for _, eps := range ep.Subsets { - for _, addr := range eps.Addresses { - if localIP.Equal(net.ParseIP(addr.IP)) { - svcNamespace = ep.Namespace - svcName = ep.Name - break FindEndpoint + // If the CoreDNS Endpoint is not found, use the locally bound IP address + if len(endpoints) == 0 { + svcNames = []string{defaultNSName + zone} + svcIPs = []net.IP{localIP} + } else { + // Collect IPs for all Services of the Endpoints + for _, endpoint := range endpoints { + svcs := k.APIConn.SvcIndex(object.ServiceKey(endpoint.Name, endpoint.Namespace)) + for _, svc := range svcs { + if external { + svcName := strings.Join([]string{svc.Name, svc.Namespace, zone}, ".") + for _, exIP := range svc.ExternalIPs { + svcNames = append(svcNames, svcName) + svcIPs = append(svcIPs, net.ParseIP(exIP)) + } + continue + } + svcName := strings.Join([]string{svc.Name, svc.Namespace, Svc, zone}, ".") + if svc.ClusterIP == api.ClusterIPNone { + // For a headless service, use the endpoints IPs + for _, s := range endpoint.Subsets { + for _, a := range s.Addresses { + svcNames = append(svcNames, endpointHostname(a, k.endpointNameMode)+"."+svcName) + svcIPs = append(svcIPs, net.ParseIP(a.IP)) + } + } + } else { + svcNames = append(svcNames, svcName) + svcIPs = append(svcIPs, net.ParseIP(svc.ClusterIP)) } } } } - if len(svcName) == 0 { - rr.Hdr.Name = defaultNSName - rr.A = localIP - return rr - } - -FindService: - for _, svc := range k.APIConn.ServiceList() { - if svcName == svc.Name && svcNamespace == svc.Namespace { - if svc.ClusterIP == api.ClusterIPNone { - rr.A = localIP - } else { - rr.A = net.ParseIP(svc.ClusterIP) - } - break FindService + // Create an RR slice of collected IPs + var rrs []dns.RR + rrs = make([]dns.RR, len(svcIPs)) + for i, ip := range svcIPs { + if ip.To4() == nil { + rr := new(dns.AAAA) + rr.Hdr.Class = dns.ClassINET + rr.Hdr.Rrtype = dns.TypeAAAA + rr.Hdr.Name = svcNames[i] + rr.AAAA = ip + rrs[i] = rr + continue } + rr := new(dns.A) + rr.Hdr.Class = dns.ClassINET + rr.Hdr.Rrtype = dns.TypeA + rr.Hdr.Name = svcNames[i] + rr.A = ip + rrs[i] = rr } - rr.Hdr.Name = strings.Join([]string{svcName, svcNamespace, "svc."}, ".") - - return rr + return rrs } const defaultNSName = "ns.dns." diff --git a/plugin/kubernetes/ns_test.go b/plugin/kubernetes/ns_test.go index df7cf5f83d5..af057c254db 100644 --- a/plugin/kubernetes/ns_test.go +++ b/plugin/kubernetes/ns_test.go @@ -1,9 +1,11 @@ package kubernetes import ( + "net" "testing" "github.com/coredns/coredns/plugin/kubernetes/object" + "github.com/miekg/dns" api "k8s.io/api/core/v1" ) @@ -14,12 +16,23 @@ func (APIConnTest) HasSynced() bool { return true } func (APIConnTest) Run() { return } func (APIConnTest) Stop() error { return nil } func (APIConnTest) PodIndex(string) []*object.Pod { return nil } -func (APIConnTest) SvcIndex(string) []*object.Service { return nil } func (APIConnTest) SvcIndexReverse(string) []*object.Service { return nil } func (APIConnTest) EpIndex(string) []*object.Endpoints { return nil } func (APIConnTest) EndpointsList() []*object.Endpoints { return nil } func (APIConnTest) Modified() int64 { return 0 } +func (a APIConnTest) SvcIndex(s string) []*object.Service { + switch s { + case "dns-service.kube-system": + return []*object.Service{a.ServiceList()[0]} + case "hdls-dns-service.kube-system": + return []*object.Service{a.ServiceList()[1]} + case "dns6-service.kube-system": + return []*object.Service{a.ServiceList()[2]} + } + return nil +} + func (APIConnTest) ServiceList() []*object.Service { svcs := []*object.Service{ { @@ -27,18 +40,31 @@ func (APIConnTest) ServiceList() []*object.Service { Namespace: "kube-system", ClusterIP: "10.0.0.111", }, + { + Name: "hdls-dns-service", + Namespace: "kube-system", + ClusterIP: api.ClusterIPNone, + }, + { + Name: "dns6-service", + Namespace: "kube-system", + ClusterIP: "10::111", + }, } return svcs } -func (APIConnTest) EpIndexReverse(string) []*object.Endpoints { +func (APIConnTest) EpIndexReverse(ip string) []*object.Endpoints { + if ip != "10.244.0.20" { + return nil + } eps := []*object.Endpoints{ { Subsets: []object.EndpointSubset{ { Addresses: []object.EndpointAddress{ { - IP: "127.0.0.1", + IP: "10.244.0.20", }, }, }, @@ -46,6 +72,32 @@ func (APIConnTest) EpIndexReverse(string) []*object.Endpoints { Name: "dns-service", Namespace: "kube-system", }, + { + Subsets: []object.EndpointSubset{ + { + Addresses: []object.EndpointAddress{ + { + IP: "10.244.0.20", + }, + }, + }, + }, + Name: "hdls-dns-service", + Namespace: "kube-system", + }, + { + Subsets: []object.EndpointSubset{ + { + Addresses: []object.EndpointAddress{ + { + IP: "10.244.0.20", + }, + }, + }, + }, + Name: "dns6-service", + Namespace: "kube-system", + }, } return eps } @@ -55,19 +107,43 @@ func (APIConnTest) GetNamespaceByName(name string) (*api.Namespace, error) { return &api.Namespace{}, nil } -func TestNsAddr(t *testing.T) { +func TestNsAddrs(t *testing.T) { k := New([]string{"inter.webs.test."}) k.APIConn = &APIConnTest{} + k.interfaceAddrsFunc = func() net.IP { return net.ParseIP("10.244.0.20") } - cdr := k.nsAddr() - expected := "10.0.0.111" + cdrs := k.nsAddrs(false, k.Zones[0]) - if cdr.A.String() != expected { - t.Errorf("Expected A to be %q, got %q", expected, cdr.A.String()) + if len(cdrs) != 3 { + t.Fatalf("Expected 3 results, got %v", len(cdrs)) + + } + cdr := cdrs[0] + expected := "10.0.0.111" + if cdr.(*dns.A).A.String() != expected { + t.Errorf("Expected 1st A to be %q, got %q", expected, cdr.(*dns.A).A.String()) + } + expected = "dns-service.kube-system.svc.inter.webs.test." + if cdr.Header().Name != expected { + t.Errorf("Expected 1st Header Name to be %q, got %q", expected, cdr.Header().Name) + } + cdr = cdrs[1] + expected = "10.244.0.20" + if cdr.(*dns.A).A.String() != expected { + t.Errorf("Expected 2nd A to be %q, got %q", expected, cdr.(*dns.A).A.String()) + } + expected = "10-244-0-20.hdls-dns-service.kube-system.svc.inter.webs.test." + if cdr.Header().Name != expected { + t.Errorf("Expected 2nd Header Name to be %q, got %q", expected, cdr.Header().Name) + } + cdr = cdrs[2] + expected = "10::111" + if cdr.(*dns.AAAA).AAAA.String() != expected { + t.Errorf("Expected AAAA to be %q, got %q", expected, cdr.(*dns.A).A.String()) } - expected = "dns-service.kube-system.svc." - if cdr.Hdr.Name != expected { - t.Errorf("Expected Hdr.Name to be %q, got %q", expected, cdr.Hdr.Name) + expected = "dns6-service.kube-system.svc.inter.webs.test." + if cdr.Header().Name != expected { + t.Errorf("Expected AAAA Header Name to be %q, got %q", expected, cdr.Header().Name) } } From 59e74eb15d31298f27b5bab7c02241e832de9dad Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Fri, 23 Aug 2019 14:51:42 -0400 Subject: [PATCH 125/324] correct test and code (#3184) --- plugin/k8s_external/apex.go | 2 +- plugin/k8s_external/apex_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/k8s_external/apex.go b/plugin/k8s_external/apex.go index f58894817e5..85edbea6cda 100644 --- a/plugin/k8s_external/apex.go +++ b/plugin/k8s_external/apex.go @@ -20,7 +20,7 @@ func (e *External) serveApex(state request.Request) (int, error) { addr := e.externalAddrFunc(state) for _, rr := range addr { rr.Header().Ttl = e.ttl - rr.Header().Name = state.QName() + rr.Header().Name = dnsutil.Join("ns1", e.apex, state.QName()) m.Extra = append(m.Extra, rr) } default: diff --git a/plugin/k8s_external/apex_test.go b/plugin/k8s_external/apex_test.go index 14aaf64bcb5..2f6923f56c2 100644 --- a/plugin/k8s_external/apex_test.go +++ b/plugin/k8s_external/apex_test.go @@ -59,7 +59,7 @@ var testsApex = []test.Case{ test.NS("example.com. 5 IN NS ns1.dns.example.com."), }, Extra: []dns.RR{ - test.A("example.com. 5 IN A 127.0.0.1"), + test.A("ns1.dns.example.com. 5 IN A 127.0.0.1"), }, }, { From 62451fd3eb68cef5dcdafe6e4228f08cf52d84fc Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 24 Aug 2019 18:13:47 +0000 Subject: [PATCH 126/324] fuzzing: allow setup function to be called (#3175) This allows to fuzzing of more interesting targets that require setup. Signed-off-by: Miek Gieben --- plugin/cache/fuzz.go | 2 +- plugin/chaos/fuzz.go | 2 +- plugin/file/fuzz.go | 2 +- plugin/pkg/fuzz/do.go | 7 ++++++- plugin/pkg/fuzz/setup.go | 10 ++++++++++ plugin/rewrite/fuzz.go | 2 +- plugin/whoami/fuzz.go | 2 +- 7 files changed, 21 insertions(+), 6 deletions(-) create mode 100644 plugin/pkg/fuzz/setup.go diff --git a/plugin/cache/fuzz.go b/plugin/cache/fuzz.go index 9bf6cb3a9d0..401e9b166b0 100644 --- a/plugin/cache/fuzz.go +++ b/plugin/cache/fuzz.go @@ -8,5 +8,5 @@ import ( // Fuzz fuzzes cache. func Fuzz(data []byte) int { - return fuzz.Do(New(), data) + return fuzz.Do(New(), nil, data) } diff --git a/plugin/chaos/fuzz.go b/plugin/chaos/fuzz.go index f0e23b0831c..f5a79f775a0 100644 --- a/plugin/chaos/fuzz.go +++ b/plugin/chaos/fuzz.go @@ -9,5 +9,5 @@ import ( // Fuzz fuzzes cache. func Fuzz(data []byte) int { c := Chaos{} - return fuzz.Do(c, data) + return fuzz.Do(c, nil, data) } diff --git a/plugin/file/fuzz.go b/plugin/file/fuzz.go index 84f5c1853a2..535db0e93e5 100644 --- a/plugin/file/fuzz.go +++ b/plugin/file/fuzz.go @@ -15,7 +15,7 @@ func Fuzz(data []byte) int { zone, _ := Parse(strings.NewReader(fuzzMiekNL), name, "stdin", 0) f := File{Next: test.ErrorHandler(), Zones: Zones{Z: map[string]*Zone{name: zone}, Names: []string{name}}} - return fuzz.Do(f, data) + return fuzz.Do(f, nil, data) } const fuzzMiekNL = ` diff --git a/plugin/pkg/fuzz/do.go b/plugin/pkg/fuzz/do.go index 054c4298ab9..95a5400f1d8 100644 --- a/plugin/pkg/fuzz/do.go +++ b/plugin/pkg/fuzz/do.go @@ -11,7 +11,12 @@ import ( ) // Do will fuzz p - used by gofuzz. See Makefile.fuzz for comments and context. -func Do(p plugin.Handler, data []byte) int { +func Do(p plugin.Handler, fn SetupFunc, data []byte) int { + if fn != nil { + if err := fn(); err != nil { + panic("fuzz: " + err.Error()) + } + } ctx := context.TODO() r := new(dns.Msg) if err := r.Unpack(data); err != nil { diff --git a/plugin/pkg/fuzz/setup.go b/plugin/pkg/fuzz/setup.go new file mode 100644 index 00000000000..7c44b0ef38f --- /dev/null +++ b/plugin/pkg/fuzz/setup.go @@ -0,0 +1,10 @@ +package fuzz + +// SetupFunc can be given to Do to perform a one time setup of the fuzzing +// environment. This function is called on every fuzz, it is your +// responsibility to make it idempotent. If SetupFunc returns an error, panic +// is called with that error. +// +// There isn't a ShutdownFunc, because fuzzing is supposed to be run for a long +// time and there isn't any hook to call it from. +type SetupFunc func() error diff --git a/plugin/rewrite/fuzz.go b/plugin/rewrite/fuzz.go index 043a4a5c9a7..545ef5b93b4 100644 --- a/plugin/rewrite/fuzz.go +++ b/plugin/rewrite/fuzz.go @@ -17,5 +17,5 @@ func Fuzz(data []byte) int { } r := Rewrite{Rules: rules} - return fuzz.Do(r, data) + return fuzz.Do(r, nil, data) } diff --git a/plugin/whoami/fuzz.go b/plugin/whoami/fuzz.go index d9bbcee2bd6..917a9189f25 100644 --- a/plugin/whoami/fuzz.go +++ b/plugin/whoami/fuzz.go @@ -9,5 +9,5 @@ import ( // Fuzz fuzzes cache. func Fuzz(data []byte) int { w := Whoami{} - return fuzz.Do(w, data) + return fuzz.Do(w, nil, data) } From 07748d0c34b4c8329dfce8d1fe976ba61152dfec Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 24 Aug 2019 18:14:25 +0000 Subject: [PATCH 127/324] go report card fixes (#3182) Went over the list at https://goreportcard.com/report/github.com/coredns/coredns and removed/fix some code to make it slightly happier. Signed-off-by: Miek Gieben --- plugin/cancel/cancel_test.go | 1 - plugin/done.go | 1 - plugin/etcd/msg/service_test.go | 9 --------- plugin/kubernetes/controller_test.go | 2 +- test/secondary_test.go | 2 +- 5 files changed, 2 insertions(+), 13 deletions(-) diff --git a/plugin/cancel/cancel_test.go b/plugin/cancel/cancel_test.go index ceba9f5d277..f775518097a 100644 --- a/plugin/cancel/cancel_test.go +++ b/plugin/cancel/cancel_test.go @@ -34,7 +34,6 @@ func (s sleepPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns. return 0, nil } } - return 0, nil } func TestCancel(t *testing.T) { diff --git a/plugin/done.go b/plugin/done.go index 3f53273dade..c6ff8633b39 100644 --- a/plugin/done.go +++ b/plugin/done.go @@ -10,5 +10,4 @@ func Done(ctx context.Context) bool { default: return false } - return false } diff --git a/plugin/etcd/msg/service_test.go b/plugin/etcd/msg/service_test.go index a5038ac2cd5..f334aa5cee1 100644 --- a/plugin/etcd/msg/service_test.go +++ b/plugin/etcd/msg/service_test.go @@ -123,12 +123,3 @@ func TestGroup(t *testing.T) { t.Fatalf("Failure to group seventh set: %v", sx) } } - -func BenchmarkNewSRV(b *testing.B) { - s := &Service{Host: "www,example.org", Port: 8080} - for n := 0; n < b.N; n++ { - srv := s.NewSRV("www.example.org.", 16) - // this assignment makes sure s.NewSRV doesn't get optimized out - srv = srv - } -} diff --git a/plugin/kubernetes/controller_test.go b/plugin/kubernetes/controller_test.go index 37d4705e42c..f945684cb01 100644 --- a/plugin/kubernetes/controller_test.go +++ b/plugin/kubernetes/controller_test.go @@ -78,7 +78,7 @@ func generateEndpoints(cidr string, client kubernetes.Interface) { }, } ep.ObjectMeta.Name = "svc" + strconv.Itoa(count) - _, err = client.CoreV1().Endpoints("testns").Create(ep) + client.CoreV1().Endpoints("testns").Create(ep) count++ } } diff --git a/test/secondary_test.go b/test/secondary_test.go index fd9d4d5621b..1aef05d29d6 100644 --- a/test/secondary_test.go +++ b/test/secondary_test.go @@ -73,7 +73,7 @@ func TestSecondaryZoneTransfer(t *testing.T) { var r *dns.Msg // This is now async; we need to wait for it to be transferred. for i := 0; i < 10; i++ { - r, err = dns.Exchange(m, udp) + r, _ = dns.Exchange(m, udp) if len(r.Answer) != 0 { break } From 7219bce2852ab45bf112b910076db4b8cd1baffc Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 25 Aug 2019 12:59:44 +0000 Subject: [PATCH 128/324] testing: TCPServer was only used in secondary_test.go (#3186) TCPServer was only used in secondary_test.go and even there it wasn't needed. Remove the file. Signed-off-by: Miek Gieben --- plugin/file/secondary_test.go | 28 ++++++------------- plugin/test/server.go | 52 ----------------------------------- 2 files changed, 8 insertions(+), 72 deletions(-) delete mode 100644 plugin/test/server.go diff --git a/plugin/file/secondary_test.go b/plugin/file/secondary_test.go index 5c393e4400f..820c9b9d0e7 100644 --- a/plugin/file/secondary_test.go +++ b/plugin/file/secondary_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/coredns/coredns/plugin/pkg/dnstest" "github.com/coredns/coredns/plugin/test" "github.com/coredns/coredns/request" @@ -71,18 +72,12 @@ const testZone = "secondary.miek.nl." func TestShouldTransfer(t *testing.T) { soa := soa{250} - dns.HandleFunc(testZone, soa.Handler) - defer dns.HandleRemove(testZone) - - s, addrstr, err := test.TCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() + s := dnstest.NewServer(soa.Handler) + defer s.Close() z := NewZone("testzone", "test") z.origin = testZone - z.TransferFrom = []string{addrstr} + z.TransferFrom = []string{s.Addr} // when we have a nil SOA (initial state) should, err := z.shouldTransfer() @@ -115,21 +110,14 @@ func TestShouldTransfer(t *testing.T) { func TestTransferIn(t *testing.T) { soa := soa{250} - dns.HandleFunc(testZone, soa.Handler) - defer dns.HandleRemove(testZone) - - s, addrstr, err := test.TCPServer("127.0.0.1:0") - if err != nil { - t.Fatalf("Unable to run test server: %v", err) - } - defer s.Shutdown() + s := dnstest.NewServer(soa.Handler) + defer s.Close() z := new(Zone) z.origin = testZone - z.TransferFrom = []string{addrstr} + z.TransferFrom = []string{s.Addr} - err = z.TransferIn() - if err != nil { + if err := z.TransferIn(); err != nil { t.Fatalf("Unable to run TransferIn: %v", err) } if z.Apex.SOA.String() != fmt.Sprintf("%s 3600 IN SOA bla. bla. 250 0 0 0 0", testZone) { diff --git a/plugin/test/server.go b/plugin/test/server.go deleted file mode 100644 index eb39c7a5b9a..00000000000 --- a/plugin/test/server.go +++ /dev/null @@ -1,52 +0,0 @@ -package test - -import ( - "net" - "sync" - "time" - - "github.com/miekg/dns" -) - -// TCPServer starts a DNS server with a TCP listener on laddr. -func TCPServer(laddr string) (*dns.Server, string, error) { - l, err := net.Listen("tcp", laddr) - if err != nil { - return nil, "", err - } - - server := &dns.Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = func() { waitLock.Unlock() } - - go func() { - server.ActivateAndServe() - l.Close() - }() - - waitLock.Lock() - return server, l.Addr().String(), nil -} - -// UDPServer starts a DNS server with an UDP listener on laddr. -func UDPServer(laddr string) (*dns.Server, string, error) { - pc, err := net.ListenPacket("udp", laddr) - if err != nil { - return nil, "", err - } - server := &dns.Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour} - - waitLock := sync.Mutex{} - waitLock.Lock() - server.NotifyStartedFunc = func() { waitLock.Unlock() } - - go func() { - server.ActivateAndServe() - pc.Close() - }() - - waitLock.Lock() - return server, pc.LocalAddr().String(), nil -} From 01ccbbb12deb35a84c94b646131f43709a51bfef Mon Sep 17 00:00:00 2001 From: Muhammad Falak R Wani Date: Sun, 25 Aug 2019 18:32:44 +0530 Subject: [PATCH 129/324] fuzz: use gofuzz build tag instead of fuzz (#3185) * fuzz: use gofuzz build tag instead of fuzz Since go-fuzz does not support Go modules yet, vendor dependencies. Signed-off-by: Muhammad Falak R Wani * fuzz: avoid vendoring code for go-fuzz Signed-off-by: Muhammad Falak R Wani --- Makefile.fuzz | 8 ++++---- plugin/cache/fuzz.go | 2 +- plugin/chaos/fuzz.go | 2 +- plugin/file/fuzz.go | 2 +- plugin/rewrite/fuzz.go | 2 +- plugin/whoami/fuzz.go | 2 +- test/fuzz_corefile.go | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Makefile.fuzz b/Makefile.fuzz index 89b3c6f02b1..5f915b71200 100644 --- a/Makefile.fuzz +++ b/Makefile.fuzz @@ -33,20 +33,20 @@ all: $(PLUGINS) corefile .PHONY: $(PLUGINS) $(PLUGINS): echo ifeq ($(LIBFUZZER), YES) - go-fuzz-build -tags fuzz -libfuzzer -o $(@).a ./plugin/$(@) + go-fuzz-build -libfuzzer -o $(@).a ./plugin/$(@) clang -fsanitize=fuzzer $(@).a -o $(@) else - go-fuzz-build -tags fuzz $(REPO)/plugin/$(@) + go-fuzz-build $(REPO)/plugin/$(@) go-fuzz -bin=./$(@)-fuzz.zip -workdir=fuzz/$(@) endif .PHONY: corefile corefile: ifeq ($(LIBFUZZER), YES) - go-fuzz-build -tags fuzz -libfuzzer -o $(@).a ./test + go-fuzz-build -libfuzzer -o $(@).a ./test clang -fsanitize=fuzzer $(@).a -o $(@) else - go-fuzz-build -tags fuzz $(REPO)/test + go-fuzz-build $(REPO)/test go-fuzz -bin=./test-fuzz.zip -workdir=fuzz/$(@) endif diff --git a/plugin/cache/fuzz.go b/plugin/cache/fuzz.go index 401e9b166b0..368953bbf3c 100644 --- a/plugin/cache/fuzz.go +++ b/plugin/cache/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package cache diff --git a/plugin/chaos/fuzz.go b/plugin/chaos/fuzz.go index f5a79f775a0..df2e9a0dd92 100644 --- a/plugin/chaos/fuzz.go +++ b/plugin/chaos/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package chaos diff --git a/plugin/file/fuzz.go b/plugin/file/fuzz.go index 535db0e93e5..de26ccc8a1b 100644 --- a/plugin/file/fuzz.go +++ b/plugin/file/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package file diff --git a/plugin/rewrite/fuzz.go b/plugin/rewrite/fuzz.go index 545ef5b93b4..e096db53512 100644 --- a/plugin/rewrite/fuzz.go +++ b/plugin/rewrite/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package rewrite diff --git a/plugin/whoami/fuzz.go b/plugin/whoami/fuzz.go index 917a9189f25..3d790ab6369 100644 --- a/plugin/whoami/fuzz.go +++ b/plugin/whoami/fuzz.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package whoami diff --git a/test/fuzz_corefile.go b/test/fuzz_corefile.go index 0abb9d6b3b4..a1a7e6a368d 100644 --- a/test/fuzz_corefile.go +++ b/test/fuzz_corefile.go @@ -1,4 +1,4 @@ -// +build fuzz +// +build gofuzz package test From 793bd324999ed301a7437931213dbbb2d766558f Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 25 Aug 2019 19:00:59 +0000 Subject: [PATCH 130/324] plugin/forward: add fuzzing (#3188) * plugin/forward: add fuzzing Add fuzz.go for forward Signed-off-by: Miek Gieben * Make it compile Signed-off-by: Miek Gieben --- plugin/forward/fuzz.go | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 plugin/forward/fuzz.go diff --git a/plugin/forward/fuzz.go b/plugin/forward/fuzz.go new file mode 100644 index 00000000000..eb4f782469a --- /dev/null +++ b/plugin/forward/fuzz.go @@ -0,0 +1,34 @@ +// +build gofuzz + +package forward + +import ( + "github.com/coredns/coredns/plugin/pkg/dnstest" + "github.com/coredns/coredns/plugin/pkg/fuzz" + + "github.com/miekg/dns" +) + +var f *Forward + +// abuse init to setup a environment to test against. This start another server to that will +// reflect responses. +func init() { + f = New() + s := dnstest.NewServer(r{}.reflectHandler) + f.proxies = append(f.proxies, NewProxy(s.Addr, "tcp")) + f.proxies = append(f.proxies, NewProxy(s.Addr, "udp")) +} + +// Fuzz fuzzes forward. +func Fuzz(data []byte) int { + return fuzz.Do(f, nil, data) +} + +type r struct{} + +func (r r) reflectHandler(w dns.ResponseWriter, req *dns.Msg) { + m := new(dns.Msg) + m.SetReply(req) + w.WriteMsg(m) +} From e08d3335b0014e5fa9ef6ff239ca3f4d2122f658 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 25 Aug 2019 19:01:35 +0000 Subject: [PATCH 131/324] fuzz: revert setup function (#3189) This can't be used in its current form; revert the entire PR. Revert "fuzzing: allow setup function to be called (#3175)" This reverts commit 62451fd3eb68cef5dcdafe6e4228f08cf52d84fc. --- plugin/cache/fuzz.go | 2 +- plugin/chaos/fuzz.go | 2 +- plugin/file/fuzz.go | 2 +- plugin/pkg/fuzz/do.go | 7 +------ plugin/pkg/fuzz/setup.go | 10 ---------- plugin/rewrite/fuzz.go | 2 +- plugin/whoami/fuzz.go | 2 +- 7 files changed, 6 insertions(+), 21 deletions(-) delete mode 100644 plugin/pkg/fuzz/setup.go diff --git a/plugin/cache/fuzz.go b/plugin/cache/fuzz.go index 368953bbf3c..18e98fa9f2d 100644 --- a/plugin/cache/fuzz.go +++ b/plugin/cache/fuzz.go @@ -8,5 +8,5 @@ import ( // Fuzz fuzzes cache. func Fuzz(data []byte) int { - return fuzz.Do(New(), nil, data) + return fuzz.Do(New(), data) } diff --git a/plugin/chaos/fuzz.go b/plugin/chaos/fuzz.go index df2e9a0dd92..53667f2cd84 100644 --- a/plugin/chaos/fuzz.go +++ b/plugin/chaos/fuzz.go @@ -9,5 +9,5 @@ import ( // Fuzz fuzzes cache. func Fuzz(data []byte) int { c := Chaos{} - return fuzz.Do(c, nil, data) + return fuzz.Do(c, data) } diff --git a/plugin/file/fuzz.go b/plugin/file/fuzz.go index de26ccc8a1b..e693f58bfba 100644 --- a/plugin/file/fuzz.go +++ b/plugin/file/fuzz.go @@ -15,7 +15,7 @@ func Fuzz(data []byte) int { zone, _ := Parse(strings.NewReader(fuzzMiekNL), name, "stdin", 0) f := File{Next: test.ErrorHandler(), Zones: Zones{Z: map[string]*Zone{name: zone}, Names: []string{name}}} - return fuzz.Do(f, nil, data) + return fuzz.Do(f, data) } const fuzzMiekNL = ` diff --git a/plugin/pkg/fuzz/do.go b/plugin/pkg/fuzz/do.go index 95a5400f1d8..054c4298ab9 100644 --- a/plugin/pkg/fuzz/do.go +++ b/plugin/pkg/fuzz/do.go @@ -11,12 +11,7 @@ import ( ) // Do will fuzz p - used by gofuzz. See Makefile.fuzz for comments and context. -func Do(p plugin.Handler, fn SetupFunc, data []byte) int { - if fn != nil { - if err := fn(); err != nil { - panic("fuzz: " + err.Error()) - } - } +func Do(p plugin.Handler, data []byte) int { ctx := context.TODO() r := new(dns.Msg) if err := r.Unpack(data); err != nil { diff --git a/plugin/pkg/fuzz/setup.go b/plugin/pkg/fuzz/setup.go deleted file mode 100644 index 7c44b0ef38f..00000000000 --- a/plugin/pkg/fuzz/setup.go +++ /dev/null @@ -1,10 +0,0 @@ -package fuzz - -// SetupFunc can be given to Do to perform a one time setup of the fuzzing -// environment. This function is called on every fuzz, it is your -// responsibility to make it idempotent. If SetupFunc returns an error, panic -// is called with that error. -// -// There isn't a ShutdownFunc, because fuzzing is supposed to be run for a long -// time and there isn't any hook to call it from. -type SetupFunc func() error diff --git a/plugin/rewrite/fuzz.go b/plugin/rewrite/fuzz.go index e096db53512..757ffab45c6 100644 --- a/plugin/rewrite/fuzz.go +++ b/plugin/rewrite/fuzz.go @@ -17,5 +17,5 @@ func Fuzz(data []byte) int { } r := Rewrite{Rules: rules} - return fuzz.Do(r, nil, data) + return fuzz.Do(r, data) } diff --git a/plugin/whoami/fuzz.go b/plugin/whoami/fuzz.go index 3d790ab6369..70f2bd6021b 100644 --- a/plugin/whoami/fuzz.go +++ b/plugin/whoami/fuzz.go @@ -9,5 +9,5 @@ import ( // Fuzz fuzzes cache. func Fuzz(data []byte) int { w := Whoami{} - return fuzz.Do(w, nil, data) + return fuzz.Do(w, data) } From d65cd709cd1a1dff69c8f83dff8038087191f29c Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 26 Aug 2019 08:14:43 +0000 Subject: [PATCH 132/324] plugin/file: respond correctly to IXFR message (#3177) * plugin/file: respond correctly to IXFR message Respond with a sing SOA record to an IXFR request if the SOA serials match. The added test fails on the current code with: ~~~ === RUN TestIxfrResponse --- FAIL: TestIxfrResponse (0.00s) secondary_test.go:122: Expected answer section with single RR FAIL exit status 1 ~~~ And obviously passes with the new code. This should cut down on the weird number of zone transfers that I was seeing. At some point IXFR support might be cool. Fixes: #3176 Signed-off-by: Miek Gieben * reuse code Signed-off-by: Miek Gieben * Sligtht tweaks Signed-off-by: Miek Gieben --- plugin/file/xfr.go | 41 +++++++++++++++++++++++++++++++++++++ test/secondary_test.go | 46 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) diff --git a/plugin/file/xfr.go b/plugin/file/xfr.go index b2dbd145897..f5f803d11f9 100644 --- a/plugin/file/xfr.go +++ b/plugin/file/xfr.go @@ -26,6 +26,15 @@ func (x Xfr) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (in return 0, plugin.Error(x.Name(), fmt.Errorf("xfr called with non transfer type: %d", state.QType())) } + // For IXFR we take the SOA in the IXFR message (if there), compare it what we have and then decide to do an + // AXFR or just reply with one SOA message back. + if state.QType() == dns.TypeIXFR { + code, _ := x.ServeIxfr(ctx, w, r) + if plugin.ClientWrite(code) { + return code, nil + } + } + records := x.All() if len(records) == 0 { return dns.RcodeServerFailure, nil @@ -63,4 +72,36 @@ func (x Xfr) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (in // Name implements the plugin.Handler interface. func (x Xfr) Name() string { return "xfr" } +// ServeIxfr checks if we need to serve a simpler IXFR for the incoming message. +// See RFC 1995 Section 3: "... and the authority section containing the SOA record of client's version of the zone." +// and Section 2, paragraph 4 where we only need to echo the SOA record back. +// This function must be called when the qtype is IXFR. It returns a plugin.ClientWrite(code) == false, when it didn't +// write anything and we should perform an AXFR. +func (x Xfr) ServeIxfr(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + if len(r.Ns) != 1 { + return dns.RcodeServerFailure, nil + } + soa, ok := r.Ns[0].(*dns.SOA) + if !ok { + return dns.RcodeServerFailure, nil + } + + x.RLock() + if x.Apex.SOA == nil { + x.RUnlock() + return dns.RcodeServerFailure, nil + } + serial := x.Apex.SOA.Serial + x.RUnlock() + + if soa.Serial == serial { // Section 2, para 4; echo SOA back. We have the same zone + m := new(dns.Msg) + m.SetReply(r) + m.Answer = []dns.RR{soa} + w.WriteMsg(m) + return 0, nil + } + return dns.RcodeServerFailure, nil +} + const transferLength = 1000 // Start a new envelop after message reaches this size in bytes. Intentionally small to test multi envelope parsing. diff --git a/test/secondary_test.go b/test/secondary_test.go index 1aef05d29d6..1d65dfb2a3d 100644 --- a/test/secondary_test.go +++ b/test/secondary_test.go @@ -83,3 +83,49 @@ func TestSecondaryZoneTransfer(t *testing.T) { t.Fatalf("Expected answer section") } } + +func TestIxfrResponse(t *testing.T) { + // ixfr query with current soa should return single packet with that soa (no transfer needed). + name, rm, err := test.TempFile(".", exampleOrg) + if err != nil { + t.Fatalf("Failed to create zone: %s", err) + } + defer rm() + + corefile := `example.org:0 { + file ` + name + ` { + transfer to * + } +} +` + + i, udp, _, err := CoreDNSServerAndPorts(corefile) + if err != nil { + t.Fatalf("Could not get CoreDNS serving instance: %s", err) + } + defer i.Stop() + + m := new(dns.Msg) + m.SetQuestion("example.org.", dns.TypeIXFR) + m.Ns = []dns.RR{test.SOA("example.org. IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600")} // copied from exampleOrg + + var r *dns.Msg + // This is now async; we need to wait for it to be transferred. + for i := 0; i < 10; i++ { + r, _ = dns.Exchange(m, udp) + if len(r.Answer) != 0 { + break + } + time.Sleep(100 * time.Microsecond) + } + if len(r.Answer) != 1 { + t.Fatalf("Expected answer section with single RR") + } + soa, ok := r.Answer[0].(*dns.SOA) + if !ok { + t.Fatalf("Expected answer section with SOA RR") + } + if soa.Serial != 2015082541 { + t.Fatalf("Serial should be %d, got %d", 2015082541, soa.Serial) + } +} From 8bc4b85b1faf63f25c1a249d1a35b5662f2581fb Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 26 Aug 2019 08:15:05 +0000 Subject: [PATCH 133/324] fuzz: use new fuzzit (#3193) Upgrade to new version and add if-not-exists to the cmdline Signed-off-by: Miek Gieben --- Makefile | 2 +- Makefile.fuzz | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 648b8f38bfc..d5deed9040f 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ ifeq ($(TEST_TYPE),fuzzit) LIBFUZZER=YES $(MAKE) -f Makefile.fuzz all; \ $(MAKE) -sf Makefile.fuzz fuzzit; \ for i in `$(MAKE) -sf Makefile.fuzz echo`; do echo $$i; \ - ./fuzzit create job --type $(FUZZIT_TYPE) coredns/$$i ./$$i; \ + ./fuzzit create job --if-not-exists --type $(FUZZIT_TYPE) coredns/$$i ./$$i; \ done; \ fi; endif diff --git a/Makefile.fuzz b/Makefile.fuzz index 5f915b71200..6aef4685e31 100644 --- a/Makefile.fuzz +++ b/Makefile.fuzz @@ -17,7 +17,7 @@ # GO111MODULE=off go get github.com/dvyukov/go-fuzz/go-fuzz-build REPO:="github.com/coredns/coredns" -FUZZIT:=v2.4.28 +FUZZIT:=v2.4.35 # set LIBFUZZER=YES to build libfuzzer compatible targets FUZZ:=$(dir $(wildcard plugin/*/fuzz.go)) # plugin/cache/ From d4cef05d0f17bfaa97b969ba1bcf63d0d0852fd8 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2019 08:19:35 +0000 Subject: [PATCH 134/324] build(deps): bump google.golang.org/api from 0.8.0 to 0.9.0 (#3197) Bumps [google.golang.org/api](https://github.com/google/google-api-go-client) from 0.8.0 to 0.9.0. - [Release notes](https://github.com/google/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/master/CHANGES.md) - [Commits](https://github.com/google/google-api-go-client/compare/v0.8.0...v0.9.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index c25e0327360..ff7c0b80159 100644 --- a/go.mod +++ b/go.mod @@ -60,7 +60,7 @@ require ( golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect - google.golang.org/api v0.8.0 + google.golang.org/api v0.9.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.23.0 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 diff --git a/go.sum b/go.sum index ab975902d2e..10d129df0d8 100644 --- a/go.sum +++ b/go.sum @@ -460,6 +460,8 @@ google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From aeb27bdb5bc836e1871f9db526e6ab8d505d4442 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 26 Aug 2019 08:25:02 +0000 Subject: [PATCH 135/324] plugin/file: close reader for reload (#3196) This reloader didn't close the openened file handle. Add a close. Can't use `defer` because this is in a endless loop. Signed-off-by: Miek Gieben --- plugin/file/reload.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugin/file/reload.go b/plugin/file/reload.go index c56d8f37f05..79db040feeb 100644 --- a/plugin/file/reload.go +++ b/plugin/file/reload.go @@ -25,6 +25,7 @@ func (z *Zone) Reload() error { serial := z.SOASerialIfDefined() zone, err := Parse(reader, z.origin, zFile, serial) + reader.Close() if err != nil { if _, ok := err.(*serialErr); !ok { log.Errorf("Parsing zone %q: %v", z.origin, err) From 934e3721fba3ba3cbf6b0027e5fad30f00d21d82 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2019 08:26:12 +0000 Subject: [PATCH 136/324] build(deps): bump github.com/aws/aws-sdk-go from 1.23.3 to 1.23.8 (#3198) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.23.3 to 1.23.8. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.23.3...v1.23.8) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index ff7c0b80159..8a8cae18992 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.23.3 + github.com/aws/aws-sdk-go v1.23.8 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/bbolt v1.3.2 // indirect diff --git a/go.sum b/go.sum index 10d129df0d8..a000c80f45f 100644 --- a/go.sum +++ b/go.sum @@ -80,6 +80,8 @@ github.com/aws/aws-sdk-go v1.22.3 h1:6hh+6uqguPNPLtnb6rYLJnonwixttKMbvZYWVUdms98 github.com/aws/aws-sdk-go v1.22.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.3 h1:Ty/4P6tOFJkDnKDrFJWnveznvESblf8QOheD1CwQPDU= github.com/aws/aws-sdk-go v1.23.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.8 h1:G/azJoBN0pnhB3B+0eeC4yyVFYIIad6bbzg6wwtImqk= +github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 295be732acec3a8f14c8ea62f053225925fe97a0 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2019 08:26:32 +0000 Subject: [PATCH 137/324] build(deps): bump github.com/coreos/etcd (#3199) Bumps [github.com/coreos/etcd](https://github.com/coreos/etcd) from 3.3.13+incompatible to 3.3.15+incompatible. - [Release notes](https://github.com/coreos/etcd/releases) - [Changelog](https://github.com/etcd-io/etcd/blob/master/CHANGELOG-3.4.md) - [Commits](https://github.com/coreos/etcd/compare/v3.3.13...v3.3.15) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 8a8cae18992..e420cd31037 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/bbolt v1.3.2 // indirect - github.com/coreos/etcd v3.3.13+incompatible + github.com/coreos/etcd v3.3.15+incompatible github.com/coreos/go-semver v0.2.0 // indirect github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect diff --git a/go.sum b/go.sum index a000c80f45f..082d306c1bf 100644 --- a/go.sum +++ b/go.sum @@ -64,6 +64,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.20.15 h1:y9ts8MJhB7ReUidS6Rq+0KxdFeL01J+pmOlGq6YqpiQ= github.com/aws/aws-sdk-go v1.20.15/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.20.17 h1:ZQ0cM9FpuufVDHlNViD8vD68IkEQL/VUOMwJPBqkaaM= @@ -103,14 +104,19 @@ github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= +github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 h1:FE783w8WFh+Rvg+7bZ5g8p7gP4SeVS4AoNwkvazlsBg= github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -209,10 +215,12 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -243,6 +251,7 @@ github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -254,6 +263,7 @@ github.com/miekg/dns v1.1.16 h1:iMEQ/IVHxPTtx2Q07JP/k4CKRvSjiAZjZ0hnhgYEDmE= github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -281,6 +291,7 @@ github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go-opentracing v0.3.5 h1:nZPvd2EmRKP+NzFdSuxZF/FG4Y4W2gn6ugXliTAu9o0= github.com/openzipkin/zipkin-go-opentracing v0.3.5/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= @@ -318,14 +329,21 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhD github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -335,8 +353,10 @@ github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= @@ -355,6 +375,7 @@ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -413,6 +434,7 @@ golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From 2332467b87b02f168034b30abac41f60a01548b5 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 26 Aug 2019 09:33:20 +0000 Subject: [PATCH 138/324] Fuzz: fix forward build (#3200) * Fuzz: fix forward build Signed-off-by: Miek Gieben * Flag not released yet, wget quiet Signed-off-by: Miek Gieben --- Makefile | 2 +- Makefile.fuzz | 2 +- plugin/forward/fuzz.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index d5deed9040f..648b8f38bfc 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ ifeq ($(TEST_TYPE),fuzzit) LIBFUZZER=YES $(MAKE) -f Makefile.fuzz all; \ $(MAKE) -sf Makefile.fuzz fuzzit; \ for i in `$(MAKE) -sf Makefile.fuzz echo`; do echo $$i; \ - ./fuzzit create job --if-not-exists --type $(FUZZIT_TYPE) coredns/$$i ./$$i; \ + ./fuzzit create job --type $(FUZZIT_TYPE) coredns/$$i ./$$i; \ done; \ fi; endif diff --git a/Makefile.fuzz b/Makefile.fuzz index 6aef4685e31..5f4c1be2cde 100644 --- a/Makefile.fuzz +++ b/Makefile.fuzz @@ -51,7 +51,7 @@ else endif fuzzit: - wget -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/$(FUZZIT)/fuzzit_Linux_x86_64 && chmod +x fuzzit + wget --quiet -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/$(FUZZIT)/fuzzit_Linux_x86_64 && chmod +x fuzzit .PHONY: clean clean: diff --git a/plugin/forward/fuzz.go b/plugin/forward/fuzz.go index eb4f782469a..cbb4edf01d4 100644 --- a/plugin/forward/fuzz.go +++ b/plugin/forward/fuzz.go @@ -22,7 +22,7 @@ func init() { // Fuzz fuzzes forward. func Fuzz(data []byte) int { - return fuzz.Do(f, nil, data) + return fuzz.Do(f, data) } type r struct{} From dd8238ba9bbdba5accd7237e09b60e5438065f56 Mon Sep 17 00:00:00 2001 From: li mengyang Date: Mon, 26 Aug 2019 17:33:57 +0800 Subject: [PATCH 139/324] fix spelling mistakes (#3202) Signed-off-by: hwdef --- notes/coredns-004.md | 2 +- notes/coredns-1.0.2.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/notes/coredns-004.md b/notes/coredns-004.md index b19b8b14258..5360d13597f 100644 --- a/notes/coredns-004.md +++ b/notes/coredns-004.md @@ -24,7 +24,7 @@ We are now also releasing an ARM build that can run on Raspberry Pi. * *file|auto*: include zone's NSset in positive responses. * *auto*: close files and don't leak file descriptors. * *httpproxy*: new plugin that proxies to and resolves your requests over an encrypted connection. This plugin will probably be morphed into proxy at some point in the feature. Consider it experimental for the time being. -* *metrics*: `reponse_size_bytes` and `request_size_bytes` export the actual length of the packet, not the advertised bufsize. +* *metrics*: `response_size_bytes` and `request_size_bytes` export the actual length of the packet, not the advertised bufsize. * *log*: `{size}` is now the length in bytes of the request, `{rsize}` for the reply. Default logging is changed to show both. # Contributors diff --git a/notes/coredns-1.0.2.md b/notes/coredns-1.0.2.md index 4dbd3c1bf6c..413a660809c 100644 --- a/notes/coredns-1.0.2.md +++ b/notes/coredns-1.0.2.md @@ -9,7 +9,7 @@ author = "coredns" +++ We are pleased to announce the [release](https://github.com/coredns/coredns/releases/tag/v1.0.2) of CoreDNS-1.0.2! -This release can be summerized as "help external plugin developers" as most changes are geared +This release can be summarized as "help external plugin developers" as most changes are geared towards exposing CoreDNS functionality to make this as easy as possible. Is also a fairly small release. From 9fe7fb95c646dfa95b14a5aef44ca4588c9ea8c0 Mon Sep 17 00:00:00 2001 From: xieyanker Date: Mon, 26 Aug 2019 18:31:24 +0800 Subject: [PATCH 140/324] return standardized text for ready and health endpoint (#3195) --- plugin/health/health.go | 2 +- plugin/health/health_test.go | 2 +- plugin/ready/ready.go | 2 +- test/reload_test.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin/health/health.go b/plugin/health/health.go index 55ff68407e5..2ac75f08d96 100644 --- a/plugin/health/health.go +++ b/plugin/health/health.go @@ -42,7 +42,7 @@ func (h *health) OnStartup() error { h.mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { // We're always healthy. w.WriteHeader(http.StatusOK) - io.WriteString(w, "OK") + io.WriteString(w, http.StatusText(http.StatusOK)) return }) diff --git a/plugin/health/health_test.go b/plugin/health/health_test.go index 7c7ad43a13b..35ab285feef 100644 --- a/plugin/health/health_test.go +++ b/plugin/health/health_test.go @@ -31,7 +31,7 @@ func TestHealth(t *testing.T) { } response.Body.Close() - if string(content) != "OK" { + if string(content) != http.StatusText(http.StatusOK) { t.Errorf("Invalid response body: expecting 'OK', got '%s'", string(content)) } } diff --git a/plugin/ready/ready.go b/plugin/ready/ready.go index ff19b59f877..0b08c22f4ac 100644 --- a/plugin/ready/ready.go +++ b/plugin/ready/ready.go @@ -45,7 +45,7 @@ func (rd *ready) onStartup() error { ok, todo := plugins.Ready() if ok { w.WriteHeader(http.StatusOK) - io.WriteString(w, "OK") + io.WriteString(w, http.StatusText(http.StatusOK)) return } log.Infof("Still waiting on: %q", todo) diff --git a/test/reload_test.go b/test/reload_test.go index e026a97cead..f17a85b050c 100644 --- a/test/reload_test.go +++ b/test/reload_test.go @@ -110,7 +110,7 @@ func TestReloadMetricsHealth(t *testing.T) { } ok, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() - if string(ok) != "OK" { + if string(ok) != http.StatusText(http.StatusOK) { t.Errorf("Failed to receive OK, got %s", ok) } From 73391f639d2342ddc2027855121314e3bec6dabf Mon Sep 17 00:00:00 2001 From: Chris Aniszczyk Date: Mon, 26 Aug 2019 11:13:07 -0500 Subject: [PATCH 141/324] SECURITY.md to take advantage of GitHub features (#3206) https://help.github.com/en/articles/adding-a-security-policy-to-your-repository https://help.github.com/en/articles/about-maintainer-security-advisories Signed-off-by: Chris Aniszczyk --- SECURITY-RELEASE-PROCESS.md => SECURITY.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename SECURITY-RELEASE-PROCESS.md => SECURITY.md (100%) diff --git a/SECURITY-RELEASE-PROCESS.md b/SECURITY.md similarity index 100% rename from SECURITY-RELEASE-PROCESS.md rename to SECURITY.md From f189dba69d584875686f406784410c6bb4e38dea Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Mon, 26 Aug 2019 12:27:18 -0400 Subject: [PATCH 142/324] Update README.md (#3207) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1a38c402c77..11057354d4c 100644 --- a/README.md +++ b/README.md @@ -233,4 +233,4 @@ issue, instead send your report privately to `security@coredns.io`. Security rep appreciated and we will publicly thank you for it. Please consult [security vulnerability disclosures and security fix and release process -document](https://github.com/coredns/coredns/blob/master/SECURITY-RELEASE-PROCESS.md) +document](https://github.com/coredns/coredns/blob/master/SECURITY.md) From f491880d6853f2a327d6b588d4eaa381aaa5f6a0 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Tue, 27 Aug 2019 02:19:48 -0400 Subject: [PATCH 143/324] plugin/kubernetes: Restore k8s.io/client-go to v12.0.0 (#3209) * bump k8s.io/client-go and azure/go-autorest * bump azure * bump azure 2 --- go.mod | 13 ++--- go.sum | 177 ++++++++++++--------------------------------------------- 2 files changed, 42 insertions(+), 148 deletions(-) diff --git a/go.mod b/go.mod index e420cd31037..40be2ac30ee 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,8 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect - contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible + github.com/Azure/go-autorest v13.0.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.9.0 github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect @@ -29,18 +29,16 @@ require ( github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/imdario/mergo v0.3.7 // indirect github.com/jonboulle/clockwork v0.1.0 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/miekg/dns v1.1.16 - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 - github.com/openzipkin/zipkin-go-opentracing v0.3.5 // indirect github.com/philhofer/fwd v1.0.0 // indirect github.com/pkg/errors v0.8.1 // indirect github.com/prometheus/client_golang v1.1.0 @@ -65,13 +63,12 @@ require ( google.golang.org/grpc v1.23.0 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/api v0.0.0-20190313235455-40a48860b5ab - k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 - k8s.io/client-go v11.0.0+incompatible + k8s.io/api v0.0.0-20190620084959-7cf5895f2711 + k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 + k8s.io/client-go v0.0.0-20190620085101-78d2af792bab k8s.io/klog v0.4.0 k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 // indirect k8s.io/utils v0.0.0-20190529001817-6999998975a7 // indirect - sigs.k8s.io/yaml v1.1.0 // indirect ) replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.16 diff --git a/go.sum b/go.sum index 082d306c1bf..fb393ae1e28 100644 --- a/go.sum +++ b/go.sum @@ -3,59 +3,38 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= -contrib.go.opencensus.io/exporter/ocagent v0.4.6/go.mod h1:YuG83h+XWwqWjvCqn7vK4KSyLKhThY3+gNGQ37iS2V0= -contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-sdk-for-go v31.1.0+incompatible h1:5SzgnfAvUBdBwNTN23WLfZoCt/rGhLvd7QdCAaFXgY4= github.com/Azure/azure-sdk-for-go v31.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= -github.com/Azure/go-autorest/autorest v0.5.0 h1:Mlm9qy2fpQ9MvfyI41G2Zf5B4CsgjjNbLOWszfK6KrY= -github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= -github.com/Azure/go-autorest/autorest v0.8.0 h1:hdhcXqJ/T98aigNKEs1LjXlGbxWGSIP0rA/GMc15UPA= -github.com/Azure/go-autorest/autorest v0.8.0/go.mod h1:io2I5Mipuszn4buXaxt/RzC43Yf6aBtkgyUFF2HBdLY= +github.com/Azure/go-autorest v11.1.2+incompatible h1:viZ3tV5l4gE2Sw0xrasFHytCGtzYCrT+um/rrSQ1BfA= +github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezqVhr2h6t9LJIEKVO0= +github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= -github.com/Azure/go-autorest/autorest/adal v0.2.0 h1:7IBDu1jgh+ADHXnEYExkV9RE/ztOOlxdACkkPRthGKw= -github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= -github.com/Azure/go-autorest/autorest/adal v0.4.0 h1:e4yveyEm5WvJDtmxJCY8fW1d3MY7vlQw/dVgL7kEmAg= -github.com/Azure/go-autorest/autorest/adal v0.4.0/go.mod h1:n1iM1u+ZMYni6IIY+FB82IT/DbSdC3T3gO1qFQ/3jEw= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/azure/auth v0.1.0 h1:YgO/vSnJEc76NLw2ecIXvXa8bDWiqf1pOJzARAoZsYU= -github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 h1:JwftqZDtWkr3qt1kcEgPd7H57uCHsXKXf66agWUQcGw= github.com/Azure/go-autorest/autorest/azure/auth v0.3.0/go.mod h1:CI4BQYBct8NS7BXNBBX+RchsFsUu5+oz+OSyR/ZIi7U= -github.com/Azure/go-autorest/autorest/azure/cli v0.1.0 h1:YTtBrcb6mhA+PoSW8WxFDoIIyjp13XqJeX80ssQtri4= -github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= -github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= -github.com/Azure/go-autorest/tracing v0.4.0 h1:r5D+n0u8XGFQIpIYk0xlKLDgiArnnIsfUjWANW5Wto0= -github.com/Azure/go-autorest/tracing v0.4.0/go.mod h1:sVZ/n8H0f4naUjHNvSe2qjNiC3oV6+8CCqU9mhEvav8= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.21.0 h1:0GKs+e8mn1RRUzfg9oUXv3v7ZieQLmOZF/bfnmmGhM8= github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= @@ -64,23 +43,6 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aws/aws-sdk-go v1.20.15 h1:y9ts8MJhB7ReUidS6Rq+0KxdFeL01J+pmOlGq6YqpiQ= -github.com/aws/aws-sdk-go v1.20.15/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.20.17 h1:ZQ0cM9FpuufVDHlNViD8vD68IkEQL/VUOMwJPBqkaaM= -github.com/aws/aws-sdk-go v1.20.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.20.20 h1:OAR/GtjMOhenkp1NNKr1N1FgIP3mQXHeGbRhvVIAQp0= -github.com/aws/aws-sdk-go v1.20.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.21.0 h1:dRGzi4XZe5GFSJssHkRNUf/hRD/HL8bdqTYa9hpAO8c= -github.com/aws/aws-sdk-go v1.21.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.21.1 h1:IOFDnCEDybcw4V8nbKqyyjBu+vpu7hFYSfZqNuogi7I= -github.com/aws/aws-sdk-go v1.21.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.21.2 h1:CqbWrQzi7s8J2F0TRRdLvTr0+bt5Zxo2IDoFNGsAiUg= -github.com/aws/aws-sdk-go v1.21.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.22.3 h1:6hh+6uqguPNPLtnb6rYLJnonwixttKMbvZYWVUdms98= -github.com/aws/aws-sdk-go v1.22.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.23.3 h1:Ty/4P6tOFJkDnKDrFJWnveznvESblf8QOheD1CwQPDU= -github.com/aws/aws-sdk-go v1.23.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.8 h1:G/azJoBN0pnhB3B+0eeC4yyVFYIIad6bbzg6wwtImqk= github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -89,43 +51,35 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqTKo= -github.com/caddyserver/caddy v1.0.1/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw1w= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.1.0-0.20181214143942-ba49f56771b8/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9W4dScCwdRESOacCCdF3FkuB5pBIOv1U= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 h1:FE783w8WFh+Rvg+7bZ5g8p7gP4SeVS4AoNwkvazlsBg= github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -133,6 +87,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 h1:QdyRyGZWLEvJG5Kw3VcVJvhXJ5tZ1MkRgqpJOEZSySM= @@ -151,15 +107,15 @@ github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80n github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -170,6 +126,7 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -183,31 +140,27 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd h1:tkA3C/XTk8iACLOlTez37pL+0iGSYkkRGKdXgJ6ZylM= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.8.3 h1:wZ6biDjnBgIOf5t+r8dYsoGWH1Zl2Ps32OxD80Odbk8= -github.com/grpc-ecosystem/grpc-gateway v1.8.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -215,17 +168,17 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= @@ -251,19 +204,15 @@ github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJC3+BNM6LJs1x21rivK7yxfTZMAuY2s= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= -github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.16 h1:iMEQ/IVHxPTtx2Q07JP/k4CKRvSjiAZjZ0hnhgYEDmE= github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -271,13 +220,13 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= @@ -286,12 +235,7 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go-opentracing v0.3.5 h1:nZPvd2EmRKP+NzFdSuxZF/FG4Y4W2gn6ugXliTAu9o0= -github.com/openzipkin/zipkin-go-opentracing v0.3.5/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= @@ -301,26 +245,19 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= @@ -329,21 +266,15 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhD github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -353,17 +284,10 @@ github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= -go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -375,7 +299,7 @@ go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -387,9 +311,7 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -399,11 +321,11 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -417,8 +339,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -433,9 +355,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -448,21 +367,19 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -475,26 +392,17 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -502,23 +410,12 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD5x+fRy6U0SXxZehzUHRSYUJcKfII= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/DataDog/dd-trace-go.v1 v1.15.0 h1:2LhklnAJsRSelbnBrrE5QuRleRDkmOh2JWxOtIX6yec= -gopkg.in/DataDog/dd-trace-go.v1 v1.15.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= -gopkg.in/DataDog/dd-trace-go.v1 v1.16.0 h1:M+pIoj6uFByW3E8KaAaBmsLl3Sma3JzXZ9eo7ffyX5A= -gopkg.in/DataDog/dd-trace-go.v1 v1.16.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4nH2UWo= gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -527,6 +424,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= @@ -540,25 +438,24 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab h1:DG9A67baNpoeweOy2spF1OWHhnVY5KR7/Ek/+U1lVZc= -k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjGoLSdVOv2DbPaWHJ+ZtgKg= -k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 h1:uV4S5IB5g4Nvi+TBVNf3e9L4wrirlwYJ6w88jUQxTUw= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= +k8s.io/client-go v0.0.0-20190620085101-78d2af792bab h1:E8Fecph0qbNsAbijJJQryKu4Oi9QTp5cVpjTE+nqg6g= +k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.3 h1:niceAagH1tzskmaie/icWd7ci1wbG7Bf2c6YGcQv+3c= -k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 h1:zia7dTzfEtdiSUxi9cXUDsSQH2xE6igmGKyFn2on/9A= k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190529001817-6999998975a7 h1:5UOdmwfY+7XsXvo26XeCDu9GhHJPkO1z8Mcz5AHMnOE= k8s.io/utils v0.0.0-20190529001817-6999998975a7/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= From 55160492cd23b5b6c04ce0d26535e9fbbff4f1e8 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Tue, 27 Aug 2019 21:01:17 +0800 Subject: [PATCH 144/324] fix mis-spelling in request.go (#3213) --- request/request.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/request/request.go b/request/request.go index 1448cad6512..0ce34816cd7 100644 --- a/request/request.go +++ b/request/request.go @@ -240,7 +240,7 @@ func (r *Request) Scrub(reply *dns.Msg) *dns.Msg { // Last ditch attempt to avoid fragmentation, if the size is bigger than the v4/v6 UDP fragmentation // limit and sent via UDP compress it (in the hope we go under that limit). Limits taken from NSD: // - // .., 1480 (EDNS/IPv4), 1220 (EDNS/IPv6), or the advertized EDNS buffer size if that is + // .., 1480 (EDNS/IPv4), 1220 (EDNS/IPv6), or the advertised EDNS buffer size if that is // smaller than the EDNS default. // See: https://open.nlnetlabs.nl/pipermail/nsd-users/2011-November/001278.html if rl > 1480 && r.Family() == 1 { From 364478eb33d76ef83b3a5159a2f62ad5bcf67b02 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 27 Aug 2019 13:12:53 +0000 Subject: [PATCH 145/324] release: initial release notes (#3204) * release: initial release notes Core of the release notes. Signed-off-by: Miek Gieben * typo Signed-off-by: Miek Gieben * review Signed-off-by: Miek Gieben * spell check Signed-off-by: Miek Gieben --- notes/coredns-1.7.0.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 notes/coredns-1.7.0.md diff --git a/notes/coredns-1.7.0.md b/notes/coredns-1.7.0.md new file mode 100644 index 00000000000..e6ac5bcfd87 --- /dev/null +++ b/notes/coredns-1.7.0.md @@ -0,0 +1,35 @@ ++++ +title = "CoreDNS-1.7.0 Release" +description = "CoreDNS-1.7.0 Release Notes." +tags = ["Release", "1.7.0", "Notes"] +release = "1.7.0" +date = 2019-08-31T14:35:47+01:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.7.0](https://github.com/coredns/coredns/releases/tag/v1.7.0). + +In this release we have deprecated the *federation* plugin that was used in conjunction with the +*kubernetes* plugin. + +Further more a slew a spelling corrections and other minor improvements and polish. **And** three(!) +new plugins. + +# Plugins + +* [*acl*](/plugins/acl) blocks queries depending on their source IP address. +* [*clouddns*](/plugin/clouddns) to enable serving zone data from GCP Cloud DNS. +* [*sign*](/plugins/sign) that (DNSSEC) signs your zonefiles (in its most basic form). + +## Brought to You By + + +## Noteworthy Changes + +* plugin/clouddns: Add Google Cloud DNS plugin (https://github.com/coredns/coredns/pull/3011) +* plugin/federation: Move federation plugin to github.com/coredns/federation (https://github.com/coredns/coredns/pull/3139) +* plugin/file: close reader for reload (https://github.com/coredns/coredns/pull/3196) +* plugin/file: respond correctly to IXFR message (https://github.com/coredns/coredns/pull/3177) +* plugin/k8s_external handle NS records (https://github.com/coredns/coredns/pull/3160) +* plugin/kubernetes: handle NS records (https://github.com/coredns/coredns/pull/3160) From 87bd9dec856b743d65d71f90accc4296d767b109 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 27 Aug 2019 13:54:23 +0000 Subject: [PATCH 146/324] plugin/file: less notify logging spam (#3212) Say once that we've sent notifies, instead of for every upstream primary. Signed-off-by: Miek Gieben --- plugin/file/notify.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugin/file/notify.go b/plugin/file/notify.go index ce6a0b09588..83d73ee6f7a 100644 --- a/plugin/file/notify.go +++ b/plugin/file/notify.go @@ -53,10 +53,9 @@ func notify(zone string, to []string) error { } if err := notifyAddr(c, m, t); err != nil { log.Error(err.Error()) - } else { - log.Infof("Sent notify for zone %q to %q", zone, t) } } + log.Infof("Sent notifies for zone %q to %v", zone, to) return nil } From 06b61a17c833cb3637ca044a8756e5a772d3ba73 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 27 Aug 2019 16:01:58 +0100 Subject: [PATCH 147/324] update etcd to 3.4.0rc2 (#3214) This moves the entire etcd to go.etcd.io, so that it compiles cleanly when using bazel. Fixes: #3087 Signed-off-by: Miek Gieben --- go.mod | 16 ++-------------- go.sum | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 40be2ac30ee..6491318add9 100644 --- a/go.mod +++ b/go.mod @@ -16,23 +16,17 @@ require ( github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/etcd v3.3.15+incompatible - github.com/coreos/go-semver v0.2.0 // indirect github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 github.com/evanphx/json-patch v4.1.0+incompatible // indirect github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 - github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/golang/protobuf v1.3.2 github.com/googleapis/gnostic v0.2.0 // indirect github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/imdario/mergo v0.3.7 // indirect - github.com/jonboulle/clockwork v0.1.0 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/miekg/dns v1.1.16 @@ -45,17 +39,11 @@ require ( github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/common v0.6.0 github.com/sirupsen/logrus v1.4.2 // indirect - github.com/soheilhy/cmux v0.1.4 // indirect - github.com/spf13/pflag v1.0.3 // indirect + github.com/spf13/cobra v0.0.5 // indirect github.com/tinylib/msgp v1.1.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect - github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect - go.etcd.io/bbolt v1.3.2 // indirect - go.uber.org/atomic v1.3.2 // indirect - go.uber.org/multierr v1.1.0 // indirect - go.uber.org/zap v1.9.1 // indirect + go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c // indirect golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect - golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/api v0.9.0 diff --git a/go.sum b/go.sum index fb393ae1e28..8a320f2a3df 100644 --- a/go.sum +++ b/go.sum @@ -43,6 +43,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.23.8 h1:G/azJoBN0pnhB3B+0eeC4yyVFYIIad6bbzg6wwtImqk= github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -50,6 +51,7 @@ github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw1w= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= @@ -57,18 +59,25 @@ github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1q github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9W4dScCwdRESOacCCdF3FkuB5pBIOv1U= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 h1:FE783w8WFh+Rvg+7bZ5g8p7gP4SeVS4AoNwkvazlsBg= github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -80,6 +89,8 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -93,6 +104,7 @@ github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7Vpz github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 h1:QdyRyGZWLEvJG5Kw3VcVJvhXJ5tZ1MkRgqpJOEZSySM= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= @@ -152,15 +164,20 @@ github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd h1:tkA3C/XTk8iACLOlTez37pL+0iGSYkkRGKdXgJ6ZylM= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -168,11 +185,13 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -204,7 +223,11 @@ github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJC3+BNM6LJs1x21rivK7yxfTZMAuY2s= @@ -213,6 +236,7 @@ github.com/miekg/dns v1.1.16 h1:iMEQ/IVHxPTtx2Q07JP/k4CKRvSjiAZjZ0hnhgYEDmE= github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -223,6 +247,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -235,6 +260,7 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= @@ -266,15 +292,23 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhD github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -282,12 +316,22 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c h1:cmCkyKubPxaCrx7tK7JZbgVkzLIxUgsZyfkvnCUgs08= +go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c/go.mod h1:tQYIqsNuGzkF9ncfEtoEX0qkoBhzw6ih5N1xcdGnvek= +go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ= +go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -297,9 +341,12 @@ go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -338,6 +385,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 h1:HOTjhHFecQCpFnEhQ4MAH6Gf7yGklZnqaHvtezTKqnQ= +golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -355,6 +404,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -375,6 +425,7 @@ golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5f golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -422,6 +473,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= From efcc3758ddd186f1b1ae1394d936744b3b185af8 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 27 Aug 2019 16:58:35 +0100 Subject: [PATCH 148/324] plugin/etcd: fully move to go.etcd.io (#3215) This is needed as well to make it compile cleanly. Follow up to #3214. Manually removed github.com/coreos/etcd/ from go.mod Signed-off-by: Miek Gieben --- go.mod | 3 +-- plugin/etcd/etcd.go | 4 ++-- plugin/etcd/setup.go | 2 +- test/etcd_test.go | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 6491318add9..0488188f4b7 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/bbolt v1.3.2 // indirect - github.com/coreos/etcd v3.3.15+incompatible github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 @@ -42,7 +41,7 @@ require ( github.com/spf13/cobra v0.0.5 // indirect github.com/tinylib/msgp v1.1.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect - go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c // indirect + go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect diff --git a/plugin/etcd/etcd.go b/plugin/etcd/etcd.go index 5c8f9f11089..a39cb0e62b6 100644 --- a/plugin/etcd/etcd.go +++ b/plugin/etcd/etcd.go @@ -15,9 +15,9 @@ import ( "github.com/coredns/coredns/request" "github.com/coredns/coredns/plugin/pkg/upstream" - etcdcv3 "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/mvcc/mvccpb" "github.com/miekg/dns" + etcdcv3 "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/mvcc/mvccpb" ) const ( diff --git a/plugin/etcd/setup.go b/plugin/etcd/setup.go index 18d3340c569..d7827c875d7 100644 --- a/plugin/etcd/setup.go +++ b/plugin/etcd/setup.go @@ -10,7 +10,7 @@ import ( "github.com/coredns/coredns/plugin/pkg/upstream" "github.com/caddyserver/caddy" - etcdcv3 "github.com/coreos/etcd/clientv3" + etcdcv3 "go.etcd.io/etcd/clientv3" ) var log = clog.NewWithPlugin("etcd") diff --git a/test/etcd_test.go b/test/etcd_test.go index 2ac165beb37..f47c7206556 100644 --- a/test/etcd_test.go +++ b/test/etcd_test.go @@ -11,8 +11,8 @@ import ( "github.com/coredns/coredns/plugin/etcd" "github.com/coredns/coredns/plugin/etcd/msg" - etcdcv3 "github.com/coreos/etcd/clientv3" "github.com/miekg/dns" + etcdcv3 "go.etcd.io/etcd/clientv3" ) func etcdPlugin() *etcd.Etcd { From df6235995b2d0fdbf499b998da2e9ca91fde962f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E8=B0=AD=E5=86=9B?= <2799194073@qq.com> Date: Wed, 28 Aug 2019 14:18:33 +0800 Subject: [PATCH 149/324] fix the-the mistake (#3216) --- plugin/kubernetes/ns.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kubernetes/ns.go b/plugin/kubernetes/ns.go index 6d18c0b77f0..4774e176f9a 100644 --- a/plugin/kubernetes/ns.go +++ b/plugin/kubernetes/ns.go @@ -14,7 +14,7 @@ func isDefaultNS(name, zone string) bool { } // nsAddrs returns the A or AAAA records for the CoreDNS service in the cluster. If the service cannot be found, -// it returns a record for the the local address of the machine we're running on. +// it returns a record for the local address of the machine we're running on. func (k *Kubernetes) nsAddrs(external bool, zone string) []dns.RR { var ( svcNames []string From 4fca3b0fb0d443b5b86beafea123f757b9ff2d69 Mon Sep 17 00:00:00 2001 From: AllenZMC Date: Wed, 28 Aug 2019 21:34:48 +0800 Subject: [PATCH 150/324] fix mis-spelling in pcap.go (#3222) --- plugin/debug/pcap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/debug/pcap.go b/plugin/debug/pcap.go index 0663f6dce5b..493478a409f 100644 --- a/plugin/debug/pcap.go +++ b/plugin/debug/pcap.go @@ -9,7 +9,7 @@ import ( "github.com/miekg/dns" ) -// Hexdump converts the dns message m to a hex dump Whireshark can import. +// Hexdump converts the dns message m to a hex dump Wireshark can import. // See https://www.wireshark.org/docs/man-pages/text2pcap.html. // This output looks like this: // From 6fdf130b67fee68eb43e1520d27d827389f3ac23 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 28 Aug 2019 14:41:11 +0100 Subject: [PATCH 151/324] pkg/log: remove timestamp (#3218) journald timestamps, kubernetes timestamps, syslog timestamps. It seems silly to add our own timestamps to the logging output as these external ones *also* do it. Only when just running coredns this might be weird. Remove the timestamping from pkg/log. Remove test that tested for this. Fixes: #3211 Signed-off-by: Miek Gieben --- plugin/pkg/log/log.go | 18 +++++++----------- plugin/pkg/log/plugin_test.go | 16 ---------------- 2 files changed, 7 insertions(+), 27 deletions(-) diff --git a/plugin/pkg/log/log.go b/plugin/pkg/log/log.go index 3050999afe4..2d8ba78d851 100644 --- a/plugin/pkg/log/log.go +++ b/plugin/pkg/log/log.go @@ -14,7 +14,6 @@ import ( golog "log" "os" "sync" - "time" ) // D controls whether we should output debug logs. If true, we do, once set @@ -41,17 +40,14 @@ func (d *d) Value() bool { return b } -// RFC3339Milli doesn't exist, invent it here. -func clock() string { return time.Now().Format("2006-01-02T15:04:05.000Z07:00") } - // logf calls log.Printf prefixed with level. func logf(level, format string, v ...interface{}) { - golog.Print(clock(), level, fmt.Sprintf(format, v...)) + golog.Print(level, fmt.Sprintf(format, v...)) } // log calls log.Print prefixed with level. func log(level string, v ...interface{}) { - golog.Print(clock(), level, fmt.Sprint(v...)) + golog.Print(level, fmt.Sprint(v...)) } // Debug is equivalent to log.Print(), but prefixed with "[DEBUG] ". It only outputs something @@ -102,9 +98,9 @@ func Fatalf(format string, v ...interface{}) { logf(fatal, format, v...); os.Exi func Discard() { golog.SetOutput(ioutil.Discard) } const ( - debug = " [DEBUG] " - err = " [ERROR] " - fatal = " [FATAL] " - info = " [INFO] " - warning = " [WARNING] " + debug = "[DEBUG] " + err = "[ERROR] " + fatal = "[FATAL] " + info = "[INFO] " + warning = "[WARNING] " ) diff --git a/plugin/pkg/log/plugin_test.go b/plugin/pkg/log/plugin_test.go index c023386088c..b24caa48b09 100644 --- a/plugin/pkg/log/plugin_test.go +++ b/plugin/pkg/log/plugin_test.go @@ -19,19 +19,3 @@ func TestPlugins(t *testing.T) { t.Errorf("Expected log to be %s, got %s", info+ts, x) } } - -func TestPluginsDateTime(t *testing.T) { - var f bytes.Buffer - const ts = "test" - golog.SetFlags(0) // Set to 0 because we're doing our own time, with timezone - golog.SetOutput(&f) - - lg := NewWithPlugin("testplugin") - - lg.Info(ts) - // rude check if the date/time is there - str := f.String() - if str[4] != '-' || str[7] != '-' || str[10] != 'T' { - t.Errorf("Expected date got %s...", str[:15]) - } -} From 2cb017928c10d816db7a329b40588b814d1bc4f8 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 28 Aug 2019 14:59:00 +0100 Subject: [PATCH 152/324] plugin/etcd: clarify it's not a generic backend (#3220) * plugin/etcd: clarify it's not a generic backend Clarify the docs that this is not a generic backend. Wrap it on 80 columns as well. Signed-off-by: Miek Gieben * Update plugin/etcd/README.md Co-Authored-By: Chris O'Haver * Update plugin/etcd/README.md Co-Authored-By: Chris O'Haver --- plugin/etcd/README.md | 47 ++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/plugin/etcd/README.md b/plugin/etcd/README.md index 2c4afbc1b36..a7f44d53910 100644 --- a/plugin/etcd/README.md +++ b/plugin/etcd/README.md @@ -2,15 +2,19 @@ ## Name -*etcd* - enables reading zone data from an etcd version 3 instance. +*etcd* - enable SkyDNS service discovery from etcd. ## Description +The *etcd* plugin implements the (older) SkyDNS service discovery service. It is *not* suitable as +a generic DNS zone data plugin. Only a subset of DNS record types are implemented, and subdomains +and delegations are not handled at all. + The data in etcd instance has to be encoded as a [message](https://github.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service.go#L26) -like [SkyDNS](https://github.com/skynetservices/skydns). It should also work just like SkyDNS. +like [SkyDNS](https://github.com/skynetservices/skydns). It works just like SkyDNS. -The etcd plugin makes extensive use of the forward plugin to forward and query other servers in the +The etcd plugin makes extensive use of the *forward* plugin to forward and query other servers in the network. ## Syntax @@ -19,7 +23,7 @@ network. etcd [ZONES...] ~~~ -* **ZONES** zones etcd should be authoritative for. +* **ZONES** zones *etcd* should be authoritative for. The path will default to `/skydns` the local etcd3 proxy (http://localhost:2379). If no zones are specified the block's zone will be used as the zone. @@ -53,17 +57,20 @@ etcd [ZONES...] { is needed. ## Special Behaviour -CoreDNS etcd plugin leverages directory structure to look for related entries. For example an entry `/skydns/test/skydns/mx` would have entries like `/skydns/test/skydns/mx/a`, `/skydns/test/skydns/mx/b` and so on. Similarly a directory `/skydns/test/skydns/mx1` will have all `mx1` entries. - -With etcd3, support for [hierarchical keys are dropped](https://coreos.com/etcd/docs/latest/learning/api.html). This means there are no directories but only flat keys with prefixes in etcd3. To accommodate lookups, etcdv3 plugin now does a lookup on prefix `/skydns/test/skydns/mx/` to search for entries like `/skydns/test/skydns/mx/a` etc, and if there is nothing found on `/skydns/test/skydns/mx/`, it looks for `/skydns/test/skydns/mx` to find entries like `/skydns/test/skydns/mx1`. - -This causes two lookups from CoreDNS to etcdv3 in certain cases. -## Migration to `etcdv3` API +The *etcd* plugin leverages directory structure to look for related entries. For example +an entry `/skydns/test/skydns/mx` would have entries like `/skydns/test/skydns/mx/a`, +`/skydns/test/skydns/mx/b` and so on. Similarly a directory `/skydns/test/skydns/mx1` will have all +`mx1` entries. -With CoreDNS release `1.2.0`, you'll need to migrate existing CoreDNS related data (if any) on your etcd server to etcdv3 API. This is because with `etcdv3` support, CoreDNS can't see the data stored to an etcd server using `etcdv2` API. +With etcd3, support for [hierarchical keys are +dropped](https://coreos.com/etcd/docs/latest/learning/api.html). This means there are no directories +but only flat keys with prefixes in etcd3. To accommodate lookups, etcdv3 plugin now does a lookup +on prefix `/skydns/test/skydns/mx/` to search for entries like `/skydns/test/skydns/mx/a` etc, and +if there is nothing found on `/skydns/test/skydns/mx/`, it looks for `/skydns/test/skydns/mx` to +find entries like `/skydns/test/skydns/mx1`. -Refer this [blog by CoreOS team](https://coreos.com/blog/migrating-applications-etcd-v3.html) to migrate to etcdv3 API. +This causes two lookups from CoreDNS to etcdv3 in certain cases. ## Examples @@ -102,10 +109,16 @@ etcd skydns.local { endpoint http://localhost:2379 http://localhost:4001 ... ~~~ +Before getting started with these examples, please setup `etcdctl` (with `etcdv3` API) as explained +[here](https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html). This will help you to put +sample keys in your etcd server. -Before getting started with these examples, please setup `etcdctl` (with `etcdv3` API) as explained [here](https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html). This will help you to put sample keys in your etcd server. - -If you prefer, you can use `curl` to populate the `etcd` server, but with `curl` the endpoint URL depends on the version of `etcd`. For instance, `etcd v3.2` or before uses only [CLIENT-URL]/v3alpha/* while `etcd v3.5` or later uses [CLIENT-URL]/v3/* . Also, Key and Value must be base64 encoded in the JSON payload. With `etcdctl` these details are automatically taken care off. You can check [this document](https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/api_grpc_gateway.md#notes) for details. +If you prefer, you can use `curl` to populate the `etcd` server, but with `curl` the +endpoint URL depends on the version of `etcd`. For instance, `etcd v3.2` or before uses only +[CLIENT-URL]/v3alpha/* while `etcd v3.5` or later uses [CLIENT-URL]/v3/* . Also, Key and Value must +be base64 encoded in the JSON payload. With `etcdctl` these details are automatically taken care +of. You can check [this document](https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/api_grpc_gateway.md#notes) +for details. ### Reverse zones @@ -134,7 +147,9 @@ reverse.skydns.local. ### Zone name as A record -The zone name itself can be used as A record. This behavior can be achieved by writing special entries to the ETCD path of your zone. If your zone is named `skydns.local` for example, you can create an `A` record for this zone as follows: +The zone name itself can be used as an `A` record. This behavior can be achieved by writing special +entries to the ETCD path of your zone. If your zone is named `skydns.local` for example, you can +create an `A` record for this zone as follows: ~~~ % etcdctl put /skydns/local/skydns/ '{"host":"1.1.1.1","ttl":60}' From eec24cb0138e74eb63f59521681f3e3b3555d4f0 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 28 Aug 2019 16:12:33 +0100 Subject: [PATCH 153/324] notes: rebrand 1.7.0 as 1.6.3 (#3217) We're not making any backward incompat changes so this can just be 1.6.3 Signed-off-by: Miek Gieben --- notes/coredns-1.6.3.md | 58 ++++++++++++++++++++++++++++++++++++++++++ notes/coredns-1.7.0.md | 35 ------------------------- 2 files changed, 58 insertions(+), 35 deletions(-) create mode 100644 notes/coredns-1.6.3.md delete mode 100644 notes/coredns-1.7.0.md diff --git a/notes/coredns-1.6.3.md b/notes/coredns-1.6.3.md new file mode 100644 index 00000000000..8bea15be4ff --- /dev/null +++ b/notes/coredns-1.6.3.md @@ -0,0 +1,58 @@ ++++ +title = "CoreDNS-1.6.3 Release" +description = "CoreDNS-1.6.3 Release Notes." +tags = ["Release", "1.6.3", "Notes"] +release = "1.6.3" +date = 2019-08-31T14:35:47+01:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.3](https://github.com/coredns/coredns/releases/tag/v1.6.3). + +In this release we have moved the *federation* plugin to +[github.com/coredns/federation](https://github.com/coredns/federation), but it is still fully +functional in this version. In version 1.7.0 we expect to fully deprecate it. + +Further more a slew a spelling corrections and other minor improvements and polish. **And** three(!) +new plugins. + +# Plugins + +* [*acl*](/plugins/acl) blocks queries depending on their source IP address. +* [*clouddns*](/plugins/clouddns) to enable serving zone data from GCP Cloud DNS. +* [*sign*](/plugins/sign) that (DNSSEC) signs your zonefiles (in its most basic form). + +## Brought to You By + +AllenZMC, +Chris Aniszczyk, +Chris O'Haver, +Cricket Liu, +Guangming Wang, +Julien Garcia Gonzalez, +li mengyang, +Miek Gieben, +Muhammad Falak R Wani, +Palash Nigam, +Sakura, +wwgfhf, +xieyanker, +Xigang Wang, +Yevgeny Pats, +Yong Tang, +zhangguoyan, +陈谭军. + + +## Noteworthy Changes + +* fuzzing: Add Continuous Fuzzing Integration to Fuzzit (https://github.com/coredns/coredns/pull/3093) +* plugin/clouddns: Add Google Cloud DNS plugin (https://github.com/coredns/coredns/pull/3011) +* plugin/federation: Move federation plugin to github.com/coredns/federation (https://github.com/coredns/coredns/pull/3139) +* plugin/file: close reader for reload (https://github.com/coredns/coredns/pull/3196) +* plugin/file: less notify logging spam (https://github.com/coredns/coredns/pull/3212) +* plugin/file: respond correctly to IXFR message (https://github.com/coredns/coredns/pull/3177) +* plugin/{health,ready}: return standardized text for ready and health endpoint (https://github.com/coredns/coredns/pull/3195) +* plugin/k8s_external handle NS records (https://github.com/coredns/coredns/pull/3160) +* plugin/kubernetes: handle NS records (https://github.com/coredns/coredns/pull/3160) diff --git a/notes/coredns-1.7.0.md b/notes/coredns-1.7.0.md deleted file mode 100644 index e6ac5bcfd87..00000000000 --- a/notes/coredns-1.7.0.md +++ /dev/null @@ -1,35 +0,0 @@ -+++ -title = "CoreDNS-1.7.0 Release" -description = "CoreDNS-1.7.0 Release Notes." -tags = ["Release", "1.7.0", "Notes"] -release = "1.7.0" -date = 2019-08-31T14:35:47+01:00 -author = "coredns" -+++ - -The CoreDNS team has released -[CoreDNS-1.7.0](https://github.com/coredns/coredns/releases/tag/v1.7.0). - -In this release we have deprecated the *federation* plugin that was used in conjunction with the -*kubernetes* plugin. - -Further more a slew a spelling corrections and other minor improvements and polish. **And** three(!) -new plugins. - -# Plugins - -* [*acl*](/plugins/acl) blocks queries depending on their source IP address. -* [*clouddns*](/plugin/clouddns) to enable serving zone data from GCP Cloud DNS. -* [*sign*](/plugins/sign) that (DNSSEC) signs your zonefiles (in its most basic form). - -## Brought to You By - - -## Noteworthy Changes - -* plugin/clouddns: Add Google Cloud DNS plugin (https://github.com/coredns/coredns/pull/3011) -* plugin/federation: Move federation plugin to github.com/coredns/federation (https://github.com/coredns/coredns/pull/3139) -* plugin/file: close reader for reload (https://github.com/coredns/coredns/pull/3196) -* plugin/file: respond correctly to IXFR message (https://github.com/coredns/coredns/pull/3177) -* plugin/k8s_external handle NS records (https://github.com/coredns/coredns/pull/3160) -* plugin/kubernetes: handle NS records (https://github.com/coredns/coredns/pull/3160) From b8a0b52a5edc05145588598e7a5e2f00b82bb84d Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 29 Aug 2019 15:41:59 +0100 Subject: [PATCH 154/324] plugin/sign: a plugin that signs zone (#2993) * plugin/sign: a plugin that signs zones Sign is a plugin that signs zone data (on disk). The README.md details what exactly happens to should be accurate related to the code. Signs are signed with a CSK, resigning and first time signing is all handled by *sign* plugin. Logging with a test zone looks something like this: ~~~ txt [INFO] plugin/sign: Signing "miek.nl." because open plugin/sign/testdata/db.miek.nl.signed: no such file or directory [INFO] plugin/sign: Signed "miek.nl." with key tags "59725" in 11.670985ms, saved in "plugin/sign/testdata/db.miek.nl.signed". Next: 2019-07-20T15:49:06.560Z [INFO] plugin/file: Successfully reloaded zone "miek.nl." in "plugin/sign/testdata/db.miek.nl.signed" with serial 1563636548 [INFO] plugin/sign: Signing "miek.nl." because resign was: 10m0s ago [INFO] plugin/sign: Signed "miek.nl." with key tags "59725" in 2.055895ms, saved in "plugin/sign/testdata/db.miek.nl.signed". Next: 2019-07-20T16:09:06.560Z [INFO] plugin/file: Successfully reloaded zone "miek.nl." in "plugin/sign/testdata/db.miek.nl.signed" with serial 1563637748 ~~~ Signed-off-by: Miek Gieben * Adjust readme and remove timestamps Signed-off-by: Miek Gieben * Comment on the newline Signed-off-by: Miek Gieben * Update plugin/sign/README.md Co-Authored-By: Michael Grosser --- core/dnsserver/zdirectives.go | 1 + core/plugin/zplugin.go | 1 + go.mod | 2 +- go.sum | 8 + plugin.cfg | 1 + plugin/sign/README.md | 161 +++++++++++++ plugin/sign/dnssec.go | 20 ++ plugin/sign/file.go | 93 ++++++++ plugin/sign/file_test.go | 43 ++++ plugin/sign/keys.go | 119 ++++++++++ plugin/sign/log_test.go | 5 + plugin/sign/nsec.go | 41 ++++ plugin/sign/resign_test.go | 42 ++++ plugin/sign/setup.go | 114 +++++++++ plugin/sign/setup_test.go | 75 ++++++ plugin/sign/sign.go | 37 +++ plugin/sign/signer.go | 222 ++++++++++++++++++ plugin/sign/signer_test.go | 102 ++++++++ plugin/sign/testdata/Kmiek.nl.+013+59725.key | 5 + .../sign/testdata/Kmiek.nl.+013+59725.private | 6 + plugin/sign/testdata/db.miek.nl | 17 ++ 21 files changed, 1114 insertions(+), 1 deletion(-) create mode 100644 plugin/sign/README.md create mode 100644 plugin/sign/dnssec.go create mode 100644 plugin/sign/file.go create mode 100644 plugin/sign/file_test.go create mode 100644 plugin/sign/keys.go create mode 100644 plugin/sign/log_test.go create mode 100644 plugin/sign/nsec.go create mode 100644 plugin/sign/resign_test.go create mode 100644 plugin/sign/setup.go create mode 100644 plugin/sign/setup_test.go create mode 100644 plugin/sign/sign.go create mode 100644 plugin/sign/signer.go create mode 100644 plugin/sign/signer_test.go create mode 100644 plugin/sign/testdata/Kmiek.nl.+013+59725.key create mode 100644 plugin/sign/testdata/Kmiek.nl.+013+59725.private create mode 100644 plugin/sign/testdata/db.miek.nl diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go index b48154448f0..6dfb99226e8 100644 --- a/core/dnsserver/zdirectives.go +++ b/core/dnsserver/zdirectives.go @@ -51,4 +51,5 @@ var Directives = []string{ "erratic", "whoami", "on", + "sign", } diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index e761f858629..d522ed29490 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -40,6 +40,7 @@ import ( _ "github.com/coredns/coredns/plugin/root" _ "github.com/coredns/coredns/plugin/route53" _ "github.com/coredns/coredns/plugin/secondary" + _ "github.com/coredns/coredns/plugin/sign" _ "github.com/coredns/coredns/plugin/template" _ "github.com/coredns/coredns/plugin/tls" _ "github.com/coredns/coredns/plugin/trace" diff --git a/go.mod b/go.mod index 0488188f4b7..c2364473751 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/tinylib/msgp v1.1.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c - golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect google.golang.org/api v0.9.0 diff --git a/go.sum b/go.sum index 8a320f2a3df..82c84388dcb 100644 --- a/go.sum +++ b/go.sum @@ -353,6 +353,8 @@ golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443 h1:IcSOAf4PyMp3U3XbIEj1/xJ2BjNN2jWv7JoyOsMxXUU= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -382,6 +384,8 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -416,6 +420,8 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190618155005-516e3c20635f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -458,6 +464,8 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190626174449-989357319d63 h1:UsSJe9fhWNSz6emfIGPpH5DF23t7ALo2Pf3sC+/hsdg= google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD5x+fRy6U0SXxZehzUHRSYUJcKfII= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= diff --git a/plugin.cfg b/plugin.cfg index 2bd07c55768..ff856f72468 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -60,3 +60,4 @@ grpc:grpc erratic:erratic whoami:whoami on:github.com/caddyserver/caddy/onevent +sign:sign diff --git a/plugin/sign/README.md b/plugin/sign/README.md new file mode 100644 index 00000000000..4753598d4b5 --- /dev/null +++ b/plugin/sign/README.md @@ -0,0 +1,161 @@ +# sign + +## Name + +*sign* - add DNSSEC records to zone files. + +## Description + +The *sign* plugin is used to sign (see RFC 6781) zones. In this process DNSSEC resource records are +added. The signatures that sign the resource records sets have an expiration date, this means the +signing process must be repeated before this expiration data is reached. Otherwise the zone's data +will go BAD (RFC 4035, Section 5.5). The *sign* plugin takes care of this. *Sign* works, but has +a couple of limitations, see the "Bugs" section. + +Only NSEC is supported, *sign* does not support NSEC3. + +*Sign* works in conjunction with the *file* and *auto* plugins; this plugin **signs** the zones +files, *auto* and *file* **serve** the zones *data*. + +For this plugin to work at least one Common Signing Key, (see coredns-keygen(1)) is needed. This key +(or keys) will be used to sign the entire zone. *Sign* does not support the ZSK/KSK split, nor will +it do key or algorithm rollovers - it just signs. + +*Sign* will: + + * (Re)-sign the zone with the CSK(s) when: + + - the last time it was signed is more than a 6 days ago. Each zone will have some jitter + applied to the inception date. + + - the signature only has 14 days left before expiring. + + Both these dates are only checked on the SOA's signature(s). + + * Create signatures that have an inception of -3 hours (minus a jitter between 0 and 18 hours) + and a expiration of +32 days for every given DNSKEY. + + * Add or replace *all* apex CDS/CDNSKEY records with the ones derived from the given keys. For + each key two CDS are created one with SHA1 and another with SHA256. + + * Update the SOA's serial number to the *Unix epoch* of when the signing happens. This will + overwrite *any* previous serial number. + +Thus there are two ways that dictate when a zone is signed. Normally every 6 days (plus jitter) it +will be resigned. If for some reason we fail this check, the 14 days before expiring kicks in. + +Keys are named (following BIND9): `K++.key` and `K++.private`. +The keys **must not** be included in your zone; they will be added by *sign*. These keys can be +generated with `coredns-keygen` or BIND9's `dnssec-keygen`. You don't have to adhere to this naming +scheme, but then you need to name your keys explicitly, see the `keys file` directive. + +A generated zone is written out in a file named `db..signed` in the directory named by the +`directory` directive (which defaults to `/var/lib/coredns`). + +## Syntax + +~~~ +sign DBFILE [ZONES...] { + key file|directory KEY...|DIR... + directory DIR +} +~~~ + +* **DBFILE** the zone database file to read and parse. If the path is relative, the path from the + *root* directive will be prepended to it. +* **ZONES** zones it should be sign for. If empty, the zones from the configuration block are + used. +* `key` specifies the key(s) (there can be multiple) to sign the zone. If `file` is + used the **KEY**'s filenames are used as is. If `directory` is used, *sign* will look in **DIR** + for `K++` files. Any metadata in these files (Activate, Publish, etc.) is + *ignored*. These keys must also be Key Signing Keys (KSK). +* `directory` specifies the **DIR** where CoreDNS should save zones that have been signed. + If not given this defaults to `/var/lib/coredns`. The zones are saved under the name + `db..signed`. If the path is relative the path from the *root* directive will be prepended + to it. + +Keys can be generated with `coredns-keygen`, to create one for use in the *sign* plugin, use: +`coredns-keygen example.org` or `dnssec-keygen -a ECDSAP256SHA256 -f KSK example.org`. + +## Examples + +Sign the `example.org` zone contained in the file `db.example.org` and write the result to +`./db.example.org.signed` to let the *file* plugin pick it up and serve it. The keys used +are read from `/etc/coredns/keys/Kexample.org.key` and `/etc/coredns/keys/Kexample.org.private`. + +~~~ txt +example.org { + file db.example.org.signed + + sign db.example.org { + key file /etc/coredns/keys/Kexample.org + directory . + } +} +~~~ + +Running this leads to the following log output (note the timers in this example have been set to +shorter intervals). + +~~~ txt +[WARNING] plugin/file: Failed to open "open /tmp/db.example.org.signed: no such file or directory": trying again in 1m0s +[INFO] plugin/sign: Signing "example.org." because open /tmp/db.example.org.signed: no such file or directory +[INFO] plugin/sign: Successfully signed zone "example.org." in "/tmp/db.example.org.signed" with key tags "59725" and 1564766865 SOA serial, elapsed 9.357933ms, next: 2019-08-02T22:27:45.270Z +[INFO] plugin/file: Successfully reloaded zone "example.org." in "/tmp/db.example.org.signed" with serial 1564766865 +~~~ + +Or use a single zone file for *multiple* zones, note that the **ZONES** are repeated for both plugins. +Also note this outputs *multiple* signed output files. Here we use the default output directory +`/var/lib/coredns`. + +~~~ txt +. { + file /var/lib/coredns/db.example.org.signed example.org + file /var/lib/coredns/db.example.net.signed example.net + sign db.example.org example.org example.net { + key directory /etc/coredns/keys + } +} +~~~ + +This is the same configuration, but the zones are put in the server block, but note that you still +need to specify what file is served for what zone in the *file* plugin: + +~~~ txt +example.org example.net { + file var/lib/coredns/db.example.org.signed example.org + file var/lib/coredns/db.example.net.signed example.net + sign db.example.org { + key directory /etc/coredns/keys + } +} +~~~ + +Be careful to fully list the origins you want to sign, if you don't: + +~~~ txt +example.org example.net { + sign plugin/sign/testdata/db.example.org miek.org { + key file /etc/coredns/keys/Kexample.org + } +} +~~~ + +This will lead to `db.example.org` be signed *twice*, as this entire section is parsed twice because +you have specified the origins `example.org` and `example.net` in the server block. + +Forcibly resigning a zone can be accomplished by removing the signed zone file (CoreDNS will keep on +serving it from memory), and sending SIGUSR1 to the process to make it reload and resign the zone +file. + +## Also See + +The DNSSEC RFCs: RFC 4033, RFC 4034 and RFC 4035. And the BCP on DNSSEC, RFC 6781. Further more the +manual pages coredns-keygen(1) and dnssec-keygen(8). And the *file* plugin's documentation. + +Coredns-keygen can be found at in the coredns-keygen directory. + +## Bugs + +`keys directory` is not implemented. Glue records are currently signed, and no DS records are added +for child zones. diff --git a/plugin/sign/dnssec.go b/plugin/sign/dnssec.go new file mode 100644 index 00000000000..a95e08644b4 --- /dev/null +++ b/plugin/sign/dnssec.go @@ -0,0 +1,20 @@ +package sign + +import ( + "github.com/miekg/dns" +) + +func (p Pair) signRRs(rrs []dns.RR, signerName string, ttl, incep, expir uint32) (*dns.RRSIG, error) { + rrsig := &dns.RRSIG{ + Hdr: dns.RR_Header{Rrtype: dns.TypeRRSIG, Ttl: ttl}, + Algorithm: p.Public.Algorithm, + SignerName: signerName, + KeyTag: p.KeyTag, + OrigTtl: ttl, + Inception: incep, + Expiration: expir, + } + + e := rrsig.Sign(p.Private, rrs) + return rrsig, e +} diff --git a/plugin/sign/file.go b/plugin/sign/file.go new file mode 100644 index 00000000000..b1190126df2 --- /dev/null +++ b/plugin/sign/file.go @@ -0,0 +1,93 @@ +package sign + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/file/tree" + + "github.com/miekg/dns" +) + +// write writes out the zone file to a temporary file which is then moved into the correct place. +func (s *Signer) write(z *file.Zone) error { + f, err := ioutil.TempFile(s.directory, "signed-") + if err != nil { + return err + } + + if err := write(f, z); err != nil { + f.Close() + return err + } + + f.Close() + return os.Rename(f.Name(), filepath.Join(s.directory, s.signedfile)) +} + +func write(w io.Writer, z *file.Zone) error { + if _, err := io.WriteString(w, z.Apex.SOA.String()); err != nil { + return err + } + w.Write([]byte("\n")) // RR Stringer() method doesn't include newline, which ends the RR in a zone file, write that here. + for _, rr := range z.Apex.SIGSOA { + io.WriteString(w, rr.String()) + w.Write([]byte("\n")) + } + for _, rr := range z.Apex.NS { + io.WriteString(w, rr.String()) + w.Write([]byte("\n")) + } + for _, rr := range z.Apex.SIGNS { + io.WriteString(w, rr.String()) + w.Write([]byte("\n")) + } + err := z.Walk(func(e *tree.Elem, _ map[uint16][]dns.RR) error { + for _, r := range e.All() { + io.WriteString(w, r.String()) + w.Write([]byte("\n")) + } + return nil + }) + return err +} + +// Parse parses the zone in filename and returns a new Zone or an error. This +// is similar to the Parse function in the *file* plugin. However when parsing +// the record types DNSKEY, RRSIG, CDNSKEY and CDS are *not* included in the returned +// zone (if encountered). +func Parse(f io.Reader, origin, fileName string) (*file.Zone, error) { + zp := dns.NewZoneParser(f, dns.Fqdn(origin), fileName) + zp.SetIncludeAllowed(true) + z := file.NewZone(origin, fileName) + seenSOA := false + + for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { + if err := zp.Err(); err != nil { + return nil, err + } + + switch rr.(type) { + case *dns.DNSKEY, *dns.RRSIG, *dns.CDNSKEY, *dns.CDS: + continue + case *dns.SOA: + seenSOA = true + if err := z.Insert(rr); err != nil { + return nil, err + } + default: + if err := z.Insert(rr); err != nil { + return nil, err + } + } + } + if !seenSOA { + return nil, fmt.Errorf("file %q has no SOA record", fileName) + } + + return z, nil +} diff --git a/plugin/sign/file_test.go b/plugin/sign/file_test.go new file mode 100644 index 00000000000..72d2b02ac04 --- /dev/null +++ b/plugin/sign/file_test.go @@ -0,0 +1,43 @@ +package sign + +import ( + "os" + "testing" + + "github.com/miekg/dns" +) + +func TestFileParse(t *testing.T) { + f, err := os.Open("testdata/db.miek.nl") + if err != nil { + t.Fatal(err) + } + z, err := Parse(f, "miek.nl.", "testdata/db.miek.nl") + if err != nil { + t.Fatal(err) + } + s := &Signer{ + directory: ".", + signedfile: "db.miek.nl.test", + } + + s.write(z) + defer os.Remove("db.miek.nl.test") + + f, err = os.Open("db.miek.nl.test") + if err != nil { + t.Fatal(err) + } + z, err = Parse(f, "miek.nl.", "db.miek.nl.test") + if err != nil { + t.Fatal(err) + } + if x := z.Apex.SOA.Header().Name; x != "miek.nl." { + t.Errorf("Expected SOA name to be %s, got %s", x, "miek.nl.") + } + apex, _ := z.Search("miek.nl.") + key := apex.Type(dns.TypeDNSKEY) + if key != nil { + t.Errorf("Expected no DNSKEYs, but got %d", len(key)) + } +} diff --git a/plugin/sign/keys.go b/plugin/sign/keys.go new file mode 100644 index 00000000000..346175be04f --- /dev/null +++ b/plugin/sign/keys.go @@ -0,0 +1,119 @@ +package sign + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/coredns/coredns/core/dnsserver" + + "github.com/caddyserver/caddy" + "github.com/miekg/dns" + "golang.org/x/crypto/ed25519" +) + +// Pair holds DNSSEC key information, both the public and private components are stored here. +type Pair struct { + Public *dns.DNSKEY + KeyTag uint16 + Private crypto.Signer +} + +// keyParse reads the public and private key from disk. +func keyParse(c *caddy.Controller) ([]Pair, error) { + if !c.NextArg() { + return nil, c.ArgErr() + } + pairs := []Pair{} + config := dnsserver.GetConfig(c) + + switch c.Val() { + case "file": + ks := c.RemainingArgs() + if len(ks) == 0 { + return nil, c.ArgErr() + } + for _, k := range ks { + base := k + // Kmiek.nl.+013+26205.key, handle .private or without extension: Kmiek.nl.+013+26205 + if strings.HasSuffix(k, ".key") { + base = k[:len(k)-4] + } + if strings.HasSuffix(k, ".private") { + base = k[:len(k)-8] + } + if !filepath.IsAbs(base) && config.Root != "" { + base = filepath.Join(config.Root, base) + } + + pair, err := readKeyPair(base+".key", base+".private") + if err != nil { + return nil, err + } + pairs = append(pairs, pair) + } + case "directory": + return nil, fmt.Errorf("directory: not implemented") + } + + return pairs, nil +} + +func readKeyPair(public, private string) (Pair, error) { + rk, err := os.Open(public) + if err != nil { + return Pair{}, err + } + b, err := ioutil.ReadAll(rk) + if err != nil { + return Pair{}, err + } + dnskey, err := dns.NewRR(string(b)) + if err != nil { + return Pair{}, err + } + if _, ok := dnskey.(*dns.DNSKEY); !ok { + return Pair{}, fmt.Errorf("RR in %q is not a DNSKEY: %d", public, dnskey.Header().Rrtype) + } + ksk := dnskey.(*dns.DNSKEY).Flags&(1<<8) == (1<<8) && dnskey.(*dns.DNSKEY).Flags&1 == 1 + if !ksk { + return Pair{}, fmt.Errorf("DNSKEY in %q is not a CSK/KSK", public) + } + + rp, err := os.Open(private) + if err != nil { + return Pair{}, err + } + privkey, err := dnskey.(*dns.DNSKEY).ReadPrivateKey(rp, private) + if err != nil { + return Pair{}, err + } + switch signer := privkey.(type) { + case *ecdsa.PrivateKey: + return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil + case *ed25519.PrivateKey: + return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil + case *rsa.PrivateKey: + return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil + default: + return Pair{}, fmt.Errorf("unsupported algorithm %s", signer) + } +} + +// keyTag returns the key tags of the keys in ps as a formatted string. +func keyTag(ps []Pair) string { + if len(ps) == 0 { + return "" + } + s := "" + for _, p := range ps { + s += strconv.Itoa(int(p.KeyTag)) + "," + } + return s[:len(s)-1] +} diff --git a/plugin/sign/log_test.go b/plugin/sign/log_test.go new file mode 100644 index 00000000000..2726cd179f0 --- /dev/null +++ b/plugin/sign/log_test.go @@ -0,0 +1,5 @@ +package sign + +import clog "github.com/coredns/coredns/plugin/pkg/log" + +func init() { clog.Discard() } diff --git a/plugin/sign/nsec.go b/plugin/sign/nsec.go new file mode 100644 index 00000000000..d726ade72d1 --- /dev/null +++ b/plugin/sign/nsec.go @@ -0,0 +1,41 @@ +package sign + +import ( + "sort" + + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/file/tree" + + "github.com/miekg/dns" +) + +// names returns the elements of the zone in nsec order. If the returned boolean is true there were +// no other apex records than SOA and NS, which are stored separately. +func names(origin string, z *file.Zone) ([]string, bool) { + // if there are no apex records other than NS and SOA we'll miss the origin + // in this list. Check the first element and if not origin prepend it. + n := []string{} + z.Walk(func(e *tree.Elem, _ map[uint16][]dns.RR) error { + n = append(n, e.Name()) + return nil + }) + if len(n) == 0 { + return nil, false + } + if n[0] != origin { + n = append([]string{origin}, n...) + return n, true + } + return n, false +} + +// NSEC returns an NSEC record according to name, next, ttl and bitmap. Note that the bitmap is sorted before use. +func NSEC(name, next string, ttl uint32, bitmap []uint16) *dns.NSEC { + sort.Slice(bitmap, func(i, j int) bool { return bitmap[i] < bitmap[j] }) + + return &dns.NSEC{ + Hdr: dns.RR_Header{Name: name, Ttl: ttl, Rrtype: dns.TypeNSEC, Class: dns.ClassINET}, + NextDomain: next, + TypeBitMap: bitmap, + } +} diff --git a/plugin/sign/resign_test.go b/plugin/sign/resign_test.go new file mode 100644 index 00000000000..e2571cb5b6d --- /dev/null +++ b/plugin/sign/resign_test.go @@ -0,0 +1,42 @@ +package sign + +import ( + "strings" + "testing" + "time" +) + +func TestResignInception(t *testing.T) { + then := time.Date(2019, 7, 18, 22, 50, 0, 0, time.UTC) + // signed yesterday + zr := strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190808191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x != nil { + t.Errorf("Expected RRSIG to be valid for %s, got invalid: %s", then.Format(timeFmt), x) + } + + // inception starts after this date. + zr = strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190808191936 20190731161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x == nil { + t.Errorf("Expected RRSIG to be invalid for %s, got valid", then.Format(timeFmt)) + } +} + +func TestResignExpire(t *testing.T) { + then := time.Date(2019, 7, 18, 22, 50, 0, 0, time.UTC) + // expires tomorrow + zr := strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190717191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x == nil { + t.Errorf("Expected RRSIG to be invalid for %s, got valid", then.Format(timeFmt)) + } + // expire too far away + zr = strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190731191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x != nil { + t.Errorf("Expected RRSIG to be valid for %s, got invalid: %s", then.Format(timeFmt), x) + } + + // expired yesterday + zr = strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190721191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) + if x := resign(zr, then); x == nil { + t.Errorf("Expected RRSIG to be invalid for %s, got valid", then.Format(timeFmt)) + } +} diff --git a/plugin/sign/setup.go b/plugin/sign/setup.go new file mode 100644 index 00000000000..6cc3b711dcc --- /dev/null +++ b/plugin/sign/setup.go @@ -0,0 +1,114 @@ +package sign + +import ( + "fmt" + "math/rand" + "path/filepath" + "time" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + + "github.com/caddyserver/caddy" +) + +func init() { + caddy.RegisterPlugin("sign", caddy.Plugin{ + ServerType: "dns", + Action: setup, + }) +} + +func setup(c *caddy.Controller) error { + sign, err := parse(c) + if err != nil { + return plugin.Error("sign", err) + } + + c.OnStartup(sign.OnStartup) + c.OnStartup(func() error { + for _, signer := range sign.signers { + go signer.refresh(DurationRefreshHours) + } + return nil + }) + c.OnShutdown(func() error { + for _, signer := range sign.signers { + close(signer.stop) + } + return nil + }) + + // Don't call AddPlugin, *sign* is not a plugin. + return nil +} + +func parse(c *caddy.Controller) (*Sign, error) { + sign := &Sign{} + config := dnsserver.GetConfig(c) + + for c.Next() { + if !c.NextArg() { + return nil, c.ArgErr() + } + dbfile := c.Val() + if !filepath.IsAbs(dbfile) && config.Root != "" { + dbfile = filepath.Join(config.Root, dbfile) + } + + origins := make([]string, len(c.ServerBlockKeys)) + copy(origins, c.ServerBlockKeys) + args := c.RemainingArgs() + if len(args) > 0 { + origins = args + } + for i := range origins { + origins[i] = plugin.Host(origins[i]).Normalize() + } + + signers := make([]*Signer, len(origins)) + for i := range origins { + signers[i] = &Signer{ + dbfile: dbfile, + origin: plugin.Host(origins[i]).Normalize(), + jitter: time.Duration(float32(DurationJitter) * rand.Float32()), + directory: "/var/lib/coredns", + stop: make(chan struct{}), + signedfile: fmt.Sprintf("db.%ssigned", origins[i]), // origins[i] is a fqdn, so it ends with a dot, hence %ssigned. + } + } + + for c.NextBlock() { + switch c.Val() { + case "key": + pairs, err := keyParse(c) + if err != nil { + return sign, err + } + for i := range signers { + for _, p := range pairs { + p.Public.Header().Name = signers[i].origin + } + signers[i].keys = append(signers[i].keys, pairs...) + } + case "directory": + dir := c.RemainingArgs() + if len(dir) == 0 || len(dir) > 1 { + return sign, fmt.Errorf("can only be one argument after %q", "directory") + } + if !filepath.IsAbs(dir[0]) && config.Root != "" { + dir[0] = filepath.Join(config.Root, dir[0]) + } + for i := range signers { + signers[i].directory = dir[0] + signers[i].signedfile = fmt.Sprintf("db.%ssigned", signers[i].origin) + } + default: + return nil, c.Errf("unknown property '%s'", c.Val()) + } + } + sign.signers = append(sign.signers, signers...) + } + + return sign, nil +} diff --git a/plugin/sign/setup_test.go b/plugin/sign/setup_test.go new file mode 100644 index 00000000000..ce25720a0cb --- /dev/null +++ b/plugin/sign/setup_test.go @@ -0,0 +1,75 @@ +package sign + +import ( + "testing" + + "github.com/caddyserver/caddy" +) + +func TestParse(t *testing.T) { + tests := []struct { + input string + shouldErr bool + exp *Signer + }{ + {`sign testdata/db.miek.nl miek.nl { + key file testdata/Kmiek.nl.+013+59725 + }`, + false, + &Signer{ + keys: []Pair{}, + origin: "miek.nl.", + dbfile: "testdata/db.miek.nl", + directory: "/var/lib/coredns", + signedfile: "db.miek.nl.signed", + }, + }, + {`sign testdata/db.miek.nl example.org { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }`, + false, + &Signer{ + keys: []Pair{}, + origin: "example.org.", + dbfile: "testdata/db.miek.nl", + directory: "testdata", + signedfile: "db.example.org.signed", + }, + }, + // errors + {`sign db.example.org { + key file /etc/coredns/keys/Kexample.org + }`, + true, + nil, + }, + } + for i, tc := range tests { + c := caddy.NewTestController("dns", tc.input) + sign, err := parse(c) + + if err == nil && tc.shouldErr { + t.Fatalf("Test %d expected errors, but got no error", i) + } + if err != nil && !tc.shouldErr { + t.Fatalf("Test %d expected no errors, but got '%v'", i, err) + } + if tc.shouldErr { + continue + } + signer := sign.signers[0] + if x := signer.origin; x != tc.exp.origin { + t.Errorf("Test %d expected %s as origin, got %s", i, tc.exp.origin, x) + } + if x := signer.dbfile; x != tc.exp.dbfile { + t.Errorf("Test %d expected %s as dbfile, got %s", i, tc.exp.dbfile, x) + } + if x := signer.directory; x != tc.exp.directory { + t.Errorf("Test %d expected %s as directory, got %s", i, tc.exp.directory, x) + } + if x := signer.signedfile; x != tc.exp.signedfile { + t.Errorf("Test %d expected %s as signedfile, got %s", i, tc.exp.signedfile, x) + } + } +} diff --git a/plugin/sign/sign.go b/plugin/sign/sign.go new file mode 100644 index 00000000000..ebe4522e2b6 --- /dev/null +++ b/plugin/sign/sign.go @@ -0,0 +1,37 @@ +// Package sign implements a zone signer as a plugin. +package sign + +import ( + "path/filepath" + "time" +) + +// Sign contains signers that sign the zones files. +type Sign struct { + signers []*Signer +} + +// OnStartup scans all signers and signs or resigns zones if needed. +func (s *Sign) OnStartup() error { + for _, signer := range s.signers { + why := signer.resign() + if why == nil { + log.Infof("Skipping signing zone %q in %q: signatures are valid", signer.origin, filepath.Join(signer.directory, signer.signedfile)) + continue + } + go signAndLog(signer, why) + } + return nil +} + +// Various duration constants for signing of the zones. +const ( + DurationExpireDays = 7 * 24 * time.Hour // max time allowed before expiration + DurationResignDays = 6 * 24 * time.Hour // if the last sign happenend this long ago, sign again + DurationSignatureExpireDays = 32 * 24 * time.Hour // sign for 32 days + DurationRefreshHours = 5 * time.Hour // check zones every 5 hours + DurationJitter = -18 * time.Hour // default max jitter + DurationSignatureInceptionHours = -3 * time.Hour // -(2+1) hours, be sure to catch daylight saving time and such, jitter is substracted +) + +const timeFmt = "2006-01-02T15:04:05.000Z07:00" diff --git a/plugin/sign/signer.go b/plugin/sign/signer.go new file mode 100644 index 00000000000..a9a5f2e5aed --- /dev/null +++ b/plugin/sign/signer.go @@ -0,0 +1,222 @@ +package sign + +import ( + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/coredns/coredns/plugin/file" + "github.com/coredns/coredns/plugin/file/tree" + clog "github.com/coredns/coredns/plugin/pkg/log" + + "github.com/miekg/dns" +) + +var log = clog.NewWithPlugin("sign") + +// Signer holds the data needed to sign a zone file. +type Signer struct { + keys []Pair + origin string + dbfile string + directory string + jitter time.Duration + + signedfile string + stop chan struct{} + + expiration uint32 + inception uint32 + ttl uint32 +} + +// Sign signs a zone file according to the parameters in s. +func (s *Signer) Sign(now time.Time) (*file.Zone, error) { + rd, err := os.Open(s.dbfile) + if err != nil { + return nil, err + } + + z, err := Parse(rd, s.origin, s.dbfile) + if err != nil { + return nil, err + } + + s.inception, s.expiration = lifetime(now, s.jitter) + + s.ttl = z.Apex.SOA.Header().Ttl + z.Apex.SOA.Serial = uint32(now.Unix()) + + for _, pair := range s.keys { + pair.Public.Header().Ttl = s.ttl // set TTL on key so it matches the RRSIG. + z.Insert(pair.Public) + z.Insert(pair.Public.ToDS(dns.SHA1)) + z.Insert(pair.Public.ToDS(dns.SHA256)) + z.Insert(pair.Public.ToDS(dns.SHA1).ToCDS()) + z.Insert(pair.Public.ToDS(dns.SHA256).ToCDS()) + z.Insert(pair.Public.ToCDNSKEY()) + } + + names, apex := names(s.origin, z) + ln := len(names) + + var nsec *dns.NSEC + if apex { + nsec = NSEC(s.origin, names[(ln+1)%ln], s.ttl, []uint16{dns.TypeSOA, dns.TypeNS, dns.TypeRRSIG, dns.TypeNSEC}) + z.Insert(nsec) + } + + for _, pair := range s.keys { + rrsig, err := pair.signRRs([]dns.RR{z.Apex.SOA}, s.origin, s.ttl, s.inception, s.expiration) + if err != nil { + return nil, err + } + z.Insert(rrsig) + // NS apex may not be set if RR's have been discarded because the origin doesn't match. + if len(z.Apex.NS) > 0 { + rrsig, err = pair.signRRs(z.Apex.NS, s.origin, s.ttl, s.inception, s.expiration) + if err != nil { + return nil, err + } + z.Insert(rrsig) + } + if apex { + rrsig, err = pair.signRRs([]dns.RR{nsec}, s.origin, s.ttl, s.inception, s.expiration) + if err != nil { + return nil, err + } + z.Insert(rrsig) + } + } + + // We are walking the tree in the same direction, so names[] can be used here to indicated the next element. + i := 1 + err = z.Walk(func(e *tree.Elem, zrrs map[uint16][]dns.RR) error { + if !apex && e.Name() == s.origin { + nsec := NSEC(e.Name(), names[(ln+i)%ln], s.ttl, append(e.Types(), dns.TypeNS, dns.TypeSOA, dns.TypeNSEC, dns.TypeRRSIG)) + z.Insert(nsec) + } else { + nsec := NSEC(e.Name(), names[(ln+i)%ln], s.ttl, append(e.Types(), dns.TypeNSEC, dns.TypeRRSIG)) + z.Insert(nsec) + } + + for t, rrs := range zrrs { + if t == dns.TypeRRSIG { + continue + } + for _, pair := range s.keys { + rrsig, err := pair.signRRs(rrs, s.origin, s.ttl, s.inception, s.expiration) + if err != nil { + return err + } + e.Insert(rrsig) + } + } + i++ + return nil + }) + return z, err +} + +// resign checks if the signed zone exists, or needs resigning. +func (s *Signer) resign() error { + signedfile := filepath.Join(s.directory, s.signedfile) + rd, err := os.Open(signedfile) + if err != nil && os.IsNotExist(err) { + return err + } + + now := time.Now().UTC() + return resign(rd, now) +} + +// resign will scan rd and check the signature on the SOA record. We will resign on the basis +// of 2 conditions: +// * either the inception is more than 6 days ago, or +// * we only have 1 week left on the signature +// +// All SOA signatures will be checked. If the SOA isn't found in the first 100 +// records, we will resign the zone. +func resign(rd io.Reader, now time.Time) (why error) { + zp := dns.NewZoneParser(rd, ".", "resign") + zp.SetIncludeAllowed(true) + i := 0 + + for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { + if err := zp.Err(); err != nil { + return err + } + + switch x := rr.(type) { + case *dns.RRSIG: + if x.TypeCovered != dns.TypeSOA { + continue + } + incep, _ := time.Parse("20060102150405", dns.TimeToString(x.Inception)) + // If too long ago, resign. + if now.Sub(incep) >= 0 && now.Sub(incep) > DurationResignDays { + return fmt.Errorf("inception %q was more than: %s ago from %s: %s", incep.Format(timeFmt), DurationResignDays, now.Format(timeFmt), now.Sub(incep)) + } + // Inception hasn't even start yet. + if now.Sub(incep) < 0 { + return fmt.Errorf("inception %q date is in the future: %s", incep.Format(timeFmt), now.Sub(incep)) + } + + expire, _ := time.Parse("20060102150405", dns.TimeToString(x.Expiration)) + if expire.Sub(now) < DurationExpireDays { + return fmt.Errorf("expiration %q is less than: %s away from %s: %s", expire.Format(timeFmt), DurationExpireDays, now.Format(timeFmt), expire.Sub(now)) + } + } + i++ + if i > 100 { + // 100 is a random number. A SOA record should be the first in the zonefile, but RFC 1035 doesn't actually mandate this. So it could + // be 3rd or even later. The number 100 looks crazy high enough that it will catch all weird zones, but not high enough to keep the CPU + // busy with parsing all the time. + return fmt.Errorf("no SOA RRSIG found in first 100 records") + } + } + + return nil +} + +func signAndLog(s *Signer, why error) { + now := time.Now().UTC() + z, err := s.Sign(now) + log.Infof("Signing %q because %s", s.origin, why) + if err != nil { + log.Warningf("Error signing %q with key tags %q in %s: %s, next: %s", s.origin, keyTag(s.keys), time.Since(now), err, now.Add(DurationRefreshHours).Format(timeFmt)) + return + } + + if err := s.write(z); err != nil { + log.Warningf("Error signing %q: failed to move zone file into place: %s", s.origin, err) + return + } + log.Infof("Successfully signed zone %q in %q with key tags %q and %d SOA serial, elapsed %f, next: %s", s.origin, filepath.Join(s.directory, s.signedfile), keyTag(s.keys), z.Apex.SOA.Serial, time.Since(now).Seconds(), now.Add(DurationRefreshHours).Format(timeFmt)) +} + +// refresh checks every val if some zones need to be resigned. +func (s *Signer) refresh(val time.Duration) { + tick := time.NewTicker(val) + defer tick.Stop() + for { + select { + case <-s.stop: + return + case <-tick.C: + why := s.resign() + if why == nil { + continue + } + signAndLog(s, why) + } + } +} + +func lifetime(now time.Time, jitter time.Duration) (uint32, uint32) { + incep := uint32(now.Add(DurationSignatureInceptionHours).Add(jitter).Unix()) + expir := uint32(now.Add(DurationSignatureExpireDays).Unix()) + return incep, expir +} diff --git a/plugin/sign/signer_test.go b/plugin/sign/signer_test.go new file mode 100644 index 00000000000..6ba94247d41 --- /dev/null +++ b/plugin/sign/signer_test.go @@ -0,0 +1,102 @@ +package sign + +import ( + "io/ioutil" + "os" + "testing" + "time" + + "github.com/caddyserver/caddy" + "github.com/miekg/dns" +) + +func TestSign(t *testing.T) { + input := `sign testdata/db.miek.nl miek.nl { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }` + c := caddy.NewTestController("dns", input) + sign, err := parse(c) + if err != nil { + t.Fatal(err) + } + if len(sign.signers) != 1 { + t.Fatalf("Expected 1 signer, got %d", len(sign.signers)) + } + z, err := sign.signers[0].Sign(time.Now().UTC()) + if err != nil { + t.Error(err) + } + + apex, _ := z.Search("miek.nl.") + if x := apex.Type(dns.TypeDS); len(x) != 2 { + t.Errorf("Expected %d DS records, got %d", 2, len(x)) + } + if x := apex.Type(dns.TypeCDS); len(x) != 2 { + t.Errorf("Expected %d CDS records, got %d", 2, len(x)) + } + if x := apex.Type(dns.TypeCDNSKEY); len(x) != 1 { + t.Errorf("Expected %d CDNSKEY record, got %d", 1, len(x)) + } + if x := apex.Type(dns.TypeDNSKEY); len(x) != 1 { + t.Errorf("Expected %d DNSKEY record, got %d", 1, len(x)) + } +} + +func TestSignApexZone(t *testing.T) { + apex := `$TTL 30M +$ORIGIN example.org. +@ IN SOA linode miek.miek.nl. ( 1282630060 4H 1H 7D 4H ) + IN NS linode +` + if err := ioutil.WriteFile("db.apex-test.example.org", []byte(apex), 0644); err != nil { + t.Fatal(err) + } + defer os.Remove("db.apex-test.example.org") + input := `sign db.apex-test.example.org example.org { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }` + c := caddy.NewTestController("dns", input) + sign, err := parse(c) + if err != nil { + t.Fatal(err) + } + z, err := sign.signers[0].Sign(time.Now().UTC()) + if err != nil { + t.Error(err) + } + + el, _ := z.Search("example.org.") + nsec := el.Type(dns.TypeNSEC) + if len(nsec) != 1 { + t.Errorf("Expected 1 NSEC for %s, got %d", "example.org.", len(nsec)) + } + if x := nsec[0].(*dns.NSEC).NextDomain; x != "example.org." { + t.Errorf("Expected NSEC NextDomain %s, got %s", "example.org.", x) + } + if x := nsec[0].(*dns.NSEC).TypeBitMap; len(x) != 8 { + t.Errorf("Expected NSEC bitmap to be %d elements, got %d", 8, x) + } + if x := nsec[0].(*dns.NSEC).TypeBitMap; x[7] != dns.TypeCDNSKEY { + t.Errorf("Expected NSEC bitmap element 6 to be %d, got %d", dns.TypeCDNSKEY, x[7]) + } + if x := nsec[0].(*dns.NSEC).TypeBitMap; x[5] != dns.TypeDNSKEY { + t.Errorf("Expected NSEC bitmap element 5 to be %d, got %d", dns.TypeDNSKEY, x[5]) + } + dnskey := el.Type(dns.TypeDNSKEY) + if x := dnskey[0].Header().Ttl; x != 1800 { + t.Errorf("Expected DNSKEY TTL to be %d, got %d", 1800, x) + } + sigs := el.Type(dns.TypeRRSIG) + for _, s := range sigs { + if s.(*dns.RRSIG).TypeCovered == dns.TypeDNSKEY { + if s.(*dns.RRSIG).OrigTtl != dnskey[0].Header().Ttl { + t.Errorf("Expected RRSIG original TTL to match DNSKEY TTL, but %d != %d", s.(*dns.RRSIG).OrigTtl, dnskey[0].Header().Ttl) + } + if s.(*dns.RRSIG).SignerName != dnskey[0].Header().Name { + t.Errorf("Expected RRSIG signer name to match DNSKEY ownername, but %s != %s", s.(*dns.RRSIG).SignerName, dnskey[0].Header().Name) + } + } + } +} diff --git a/plugin/sign/testdata/Kmiek.nl.+013+59725.key b/plugin/sign/testdata/Kmiek.nl.+013+59725.key new file mode 100644 index 00000000000..b3e3654e38f --- /dev/null +++ b/plugin/sign/testdata/Kmiek.nl.+013+59725.key @@ -0,0 +1,5 @@ +; This is a key-signing key, keyid 59725, for miek.nl. +; Created: 20190709192036 (Tue Jul 9 20:20:36 2019) +; Publish: 20190709192036 (Tue Jul 9 20:20:36 2019) +; Activate: 20190709192036 (Tue Jul 9 20:20:36 2019) +miek.nl. IN DNSKEY 257 3 13 sfzRg5nDVxbeUc51su4MzjgwpOpUwnuu81SlRHqJuXe3SOYOeypR69tZ 52XLmE56TAmPHsiB8Rgk+NTpf0o1Cw== diff --git a/plugin/sign/testdata/Kmiek.nl.+013+59725.private b/plugin/sign/testdata/Kmiek.nl.+013+59725.private new file mode 100644 index 00000000000..2545ed9a9ee --- /dev/null +++ b/plugin/sign/testdata/Kmiek.nl.+013+59725.private @@ -0,0 +1,6 @@ +Private-key-format: v1.3 +Algorithm: 13 (ECDSAP256SHA256) +PrivateKey: rm7EdHRca//6xKpJzeoLt/mrfgQnltJ0WpQGtOG59yo= +Created: 20190709192036 +Publish: 20190709192036 +Activate: 20190709192036 diff --git a/plugin/sign/testdata/db.miek.nl b/plugin/sign/testdata/db.miek.nl new file mode 100644 index 00000000000..4041b1b5ef4 --- /dev/null +++ b/plugin/sign/testdata/db.miek.nl @@ -0,0 +1,17 @@ +$TTL 30M +$ORIGIN miek.nl. +@ IN SOA linode.atoom.net. miek.miek.nl. ( 1282630060 4H 1H 7D 4H ) + IN NS linode.atoom.net. + IN MX 1 aspmx.l.google.com. + IN AAAA 2a01:7e00::f03c:91ff:fe79:234c + IN DNSKEY 257 3 13 sfzRg5nDVxbeUc51su4MzjgwpOpUwnuu81SlRHqJuXe3SOYOeypR69tZ52XLmE56TAmPHsiB8Rgk+NTpf0o1Cw== + +a IN AAAA 2a01:7e00::f03c:91ff:fe79:234c +www IN CNAME a + + +bla IN NS ns1.bla.com. +ns3.blaaat.miek.nl. IN AAAA ::1 ; non-glue, should be signed. +; in baliwick nameserver that requires glue, should not be signed +bla IN NS ns2.bla.miek.nl. +ns2.bla.miek.nl. IN A 127.0.0.1 From 94930d20ea241fddb5fa1668a08112dc1acc900d Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 30 Aug 2019 13:47:27 +0100 Subject: [PATCH 155/324] plugin/file: rework outgoing axfr (#3227) * plugin/file: rework outgoing axfr Signed-off-by: Miek Gieben * Fix test Signed-off-by: Miek Gieben * Actually properly test xfr Signed-off-by: Miek Gieben * Fix test Signed-off-by: Miek Gieben --- plugin/auto/watcher_test.go | 10 ++++----- plugin/file/reload_test.go | 16 ++++++++++---- plugin/file/xfr.go | 43 +++++++++++++++++++++++-------------- plugin/file/zone.go | 35 +++++++++++------------------- test/auto_test.go | 18 ++++++++++------ 5 files changed, 69 insertions(+), 53 deletions(-) diff --git a/plugin/auto/watcher_test.go b/plugin/auto/watcher_test.go index a7013448b63..be2a0d48b17 100644 --- a/plugin/auto/watcher_test.go +++ b/plugin/auto/watcher_test.go @@ -30,12 +30,12 @@ func TestWatcher(t *testing.T) { a.Walk() - // example.org and example.com should exist - if x := len(a.Zones.Z["example.org."].All()); x != 4 { - t.Fatalf("Expected 4 RRs, got %d", x) + // example.org and example.com should exist, we have 3 apex rrs and 1 "real" record. All() returns the non-apex ones. + if x := len(a.Zones.Z["example.org."].All()); x != 1 { + t.Fatalf("Expected 1 RRs, got %d", x) } - if x := len(a.Zones.Z["example.com."].All()); x != 4 { - t.Fatalf("Expected 4 RRs, got %d", x) + if x := len(a.Zones.Z["example.com."].All()); x != 1 { + t.Fatalf("Expected 1 RRs, got %d", x) } // Now remove one file, rescan and see if it's gone. diff --git a/plugin/file/reload_test.go b/plugin/file/reload_test.go index 196565cac95..f9e544372c2 100644 --- a/plugin/file/reload_test.go +++ b/plugin/file/reload_test.go @@ -48,8 +48,12 @@ func TestZoneReload(t *testing.T) { t.Fatalf("Failed to lookup, got %d", res) } - if len(z.All()) != 5 { - t.Fatalf("Expected 5 RRs, got %d", len(z.All())) + rrs, err := z.ApexIfDefined() // all apex records. + if err != nil { + t.Fatal(err) + } + if len(rrs) != 5 { + t.Fatalf("Expected 5 RRs, got %d", len(rrs)) } if err := ioutil.WriteFile(fileName, []byte(reloadZone2Test), 0644); err != nil { t.Fatalf("Failed to write new zone data: %s", err) @@ -57,8 +61,12 @@ func TestZoneReload(t *testing.T) { // Could still be racy, but we need to wait a bit for the event to be seen time.Sleep(1 * time.Second) - if len(z.All()) != 3 { - t.Fatalf("Expected 3 RRs, got %d", len(z.All())) + rrs, err = z.ApexIfDefined() + if err != nil { + t.Fatal(err) + } + if len(rrs) != 3 { + t.Fatalf("Expected 3 RRs, got %d", len(rrs)) } } diff --git a/plugin/file/xfr.go b/plugin/file/xfr.go index f5f803d11f9..659c5c9c3e6 100644 --- a/plugin/file/xfr.go +++ b/plugin/file/xfr.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/file/tree" "github.com/coredns/coredns/request" "github.com/miekg/dns" @@ -35,8 +36,9 @@ func (x Xfr) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (in } } - records := x.All() - if len(records) == 0 { + // get soa and apex + apex, err := x.ApexIfDefined() + if err != nil { return dns.RcodeServerFailure, nil } @@ -49,23 +51,34 @@ func (x Xfr) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (in wg.Done() }() - j, l := 0, 0 - records = append(records, records[0]) // add closing SOA to the end - log.Infof("Outgoing transfer of %d records of zone %s to %s started with %d SOA serial", len(records), x.origin, state.IP(), x.SOASerialIfDefined()) - for i, r := range records { - l += dns.Len(r) - if l > transferLength { - ch <- &dns.Envelope{RR: records[j:i]} - l = 0 - j = i + rrs := []dns.RR{} + l := len(apex) + + ch <- &dns.Envelope{RR: apex} + + x.Walk(func(e *tree.Elem, _ map[uint16][]dns.RR) error { + rrs = append(rrs, e.All()...) + if len(rrs) > 500 { + ch <- &dns.Envelope{RR: rrs} + l += len(rrs) + rrs = []dns.RR{} } + return nil + }) + + if len(rrs) > 0 { + ch <- &dns.Envelope{RR: rrs} + l += len(rrs) + rrs = []dns.RR{} } - if j < len(records) { - ch <- &dns.Envelope{RR: records[j:]} - } + + ch <- &dns.Envelope{RR: []dns.RR{apex[0]}} // closing SOA. + l++ + close(ch) // Even though we close the channel here, we still have wg.Wait() // to wait before we can return and close the connection. + log.Infof("Outgoing transfer of %d records of zone %s to %s done with %d SOA serial", l, x.origin, state.IP(), apex[0].(*dns.SOA).Serial) return dns.RcodeSuccess, nil } @@ -103,5 +116,3 @@ func (x Xfr) ServeIxfr(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (i } return dns.RcodeServerFailure, nil } - -const transferLength = 1000 // Start a new envelop after message reaches this size in bytes. Intentionally small to test multi envelope parsing. diff --git a/plugin/file/zone.go b/plugin/file/zone.go index 27c95177495..62720abb490 100644 --- a/plugin/file/zone.go +++ b/plugin/file/zone.go @@ -154,36 +154,27 @@ func (z *Zone) TransferAllowed(state request.Request) bool { return false } -// All returns all records from the zone, the first record will be the SOA record, -// optionally followed by all RRSIG(SOA)s. -func (z *Zone) All() []dns.RR { - records := []dns.RR{} +// ApexIfDefined returns the apex nodes from z. The SOA record is the first record, if it does not exist, an error is returned. +func (z *Zone) ApexIfDefined() ([]dns.RR, error) { z.RLock() - allNodes := z.Tree.All() - z.RUnlock() - - for _, a := range allNodes { - records = append(records, a.All()...) + defer z.RUnlock() + if z.Apex.SOA == nil { + return nil, fmt.Errorf("no SOA") } - z.RLock() - if len(z.Apex.SIGNS) > 0 { - records = append(z.Apex.SIGNS, records...) + rrs := []dns.RR{z.Apex.SOA} + + if len(z.Apex.SIGSOA) > 0 { + rrs = append(rrs, z.Apex.SIGSOA...) } if len(z.Apex.NS) > 0 { - records = append(z.Apex.NS, records...) + rrs = append(rrs, z.Apex.NS...) } - - if len(z.Apex.SIGSOA) > 0 { - records = append(z.Apex.SIGSOA, records...) + if len(z.Apex.SIGNS) > 0 { + rrs = append(rrs, z.Apex.SIGNS...) } - if z.Apex.SOA != nil { - z.RUnlock() - return append([]dns.RR{z.Apex.SOA}, records...) - } - z.RUnlock() - return records + return rrs, nil } // NameFromRight returns the labels from the right, staring with the diff --git a/test/auto_test.go b/test/auto_test.go index 4d9b70a1c54..07e2af12ddc 100644 --- a/test/auto_test.go +++ b/test/auto_test.go @@ -129,9 +129,9 @@ func TestAutoAXFR(t *testing.T) { t.Fatalf("Could not get CoreDNS serving instance: %s", err) } - udp, _ := CoreDNSServerPorts(i, 0) - if udp == "" { - t.Fatal("Could not get UDP listening port") + _, tcp := CoreDNSServerPorts(i, 0) + if tcp == "" { + t.Fatal("Could not get TCP listening port") } defer i.Stop() @@ -142,14 +142,20 @@ func TestAutoAXFR(t *testing.T) { time.Sleep(1100 * time.Millisecond) // wait for it to be picked up + tr := new(dns.Transfer) m := new(dns.Msg) m.SetAxfr("example.org.") - resp, err := dns.Exchange(m, udp) + c, err := tr.In(m, tcp) if err != nil { t.Fatal("Expected to receive reply, but didn't") } - if len(resp.Answer) != 5 { - t.Fatalf("Expected response with %d RRs, got %d", 5, len(resp.Answer)) + l := 0 + for e := range c { + l += len(e.RR) + } + + if l != 5 { + t.Fatalf("Expected response with %d RRs, got %d", 5, l) } } From c466003a943fac7dd78b25b672922a1a0944b59a Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 30 Aug 2019 15:12:17 +0100 Subject: [PATCH 156/324] startup: add logo (#3230) * startup: add logo As discussed in #3225, lets add a little logo. This PR incorperates this in the startup text. It also removes the logging output as this outputs identical lines of text, but for no clear reason. This means --quiet now means "no output on startup at all". Currently it looks like this: ~~~ .:1053 ______ ____ _ _______ / ____/___ ________ / __ \/ | / / ___/ CoreDNS-1.6.2 / / / __ \/ ___/ _ \/ / / / |/ /\__ \ linux/amd64, go1.12.9, / /___/ /_/ / / / __/ /_/ / /| /___/ / \____/\____/_/ \___/_____/_/ |_//____/ ~~~ We have 2 lines extra on the right if we need to print more (no ideas come to mind currently). Signed-off-by: Miek Gieben * Add distinct marker for grep and cut purposes Signed-off-by: Miek Gieben --- coremain/logo.go | 14 ++++++++++++++ coremain/run.go | 17 ++++++----------- 2 files changed, 20 insertions(+), 11 deletions(-) create mode 100644 coremain/logo.go diff --git a/coremain/logo.go b/coremain/logo.go new file mode 100644 index 00000000000..09db8de4287 --- /dev/null +++ b/coremain/logo.go @@ -0,0 +1,14 @@ +package coremain + +// This is CoreDNS' ascii LOGO, nothing fancy. It's generated with: +// figlet -f slant CoreDNS +// We're printing the logo line by line, hence splitting it up. +var logo = []string{ + ` ______ ____ _ _______`, + ` / ____/___ ________ / __ \/ | / / ___/`, + ` / / / __ \/ ___/ _ \/ / / / |/ /\__ \ `, + `/ /___/ /_/ / / / __/ /_/ / /| /___/ / `, + `\____/\____/_/ \___/_____/_/ |_//____/ `, +} + +const marker = "~ " diff --git a/coremain/run.go b/coremain/run.go index 5f3b610bd90..508c74c622a 100644 --- a/coremain/run.go +++ b/coremain/run.go @@ -11,7 +11,6 @@ import ( "strings" "github.com/coredns/coredns/core/dnsserver" - clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/caddyserver/caddy" ) @@ -82,7 +81,6 @@ func Run() { mustLogFatal(err) } - logVersion() if !dnsserver.Quiet { showVersion() } @@ -141,24 +139,21 @@ func defaultLoader(serverType string) (caddy.Input, error) { }, nil } -// logVersion logs the version that is starting. -func logVersion() { - clog.Info(versionString()) - clog.Info(releaseString()) -} - -// showVersion prints the version that is starting. +// showVersion prints the version that is starting. We print our logo on the left. func showVersion() { + fmt.Println(logo[0]) fmt.Print(versionString()) fmt.Print(releaseString()) if devBuild && gitShortStat != "" { fmt.Printf("%s\n%s\n", gitShortStat, gitFilesModified) } + fmt.Println(logo[3]) + fmt.Println(logo[4]) } // versionString returns the CoreDNS version as a string. func versionString() string { - return fmt.Sprintf("%s-%s\n", caddy.AppName, caddy.AppVersion) + return fmt.Sprintf("%s\t%s%s-%s\n", logo[1], marker, caddy.AppName, caddy.AppVersion) } // releaseString returns the release information related to CoreDNS version: @@ -166,7 +161,7 @@ func versionString() string { // e.g., // linux/amd64, go1.8.3, a6d2d7b5 func releaseString() string { - return fmt.Sprintf("%s/%s, %s, %s\n", runtime.GOOS, runtime.GOARCH, runtime.Version(), GitCommit) + return fmt.Sprintf("%s\t%s%s/%s, %s, %s\n", logo[2], marker, runtime.GOOS, runtime.GOARCH, runtime.Version(), GitCommit) } // setVersion figures out the version information From 25d85338e49019ba0af1aa40084bf76a6345117f Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 30 Aug 2019 15:58:25 +0100 Subject: [PATCH 157/324] doc update: run Makefile.doc (#3232) Add the new plugins ones: clouddns and sign. Remove federation from it. Signed-off-by: Miek Gieben --- man/coredns-any.7 | 4 +- man/coredns-auto.7 | 4 +- man/coredns-autopath.7 | 4 +- man/coredns-bind.7 | 4 +- man/coredns-cache.7 | 4 +- man/coredns-cancel.7 | 4 +- man/coredns-chaos.7 | 6 +- man/coredns-clouddns.7 | 100 +++++++++++++++++ man/coredns-debug.7 | 4 +- man/coredns-dnssec.7 | 4 +- man/coredns-dnstap.7 | 4 +- man/coredns-erratic.7 | 4 +- man/coredns-errors.7 | 4 +- man/coredns-etcd.7 | 58 ++++++---- man/coredns-federation.7 | 54 --------- man/coredns-file.7 | 2 +- man/coredns-forward.7 | 4 +- man/coredns-grpc.7 | 4 +- man/coredns-health.7 | 6 +- man/coredns-hosts.7 | 2 +- man/coredns-import.7 | 4 +- man/coredns-k8s_external.7 | 20 ++-- man/coredns-loadbalance.7 | 4 +- man/coredns-log.7 | 4 +- man/coredns-loop.7 | 4 +- man/coredns-metadata.7 | 6 +- man/coredns-metrics.7 | 4 +- man/coredns-nsid.7 | 4 +- man/coredns-pprof.7 | 4 +- man/coredns-ready.7 | 6 +- man/coredns-reload.7 | 4 +- man/coredns-rewrite.7 | 4 +- man/coredns-root.7 | 4 +- man/coredns-secondary.7 | 4 +- man/coredns-sign.7 | 220 +++++++++++++++++++++++++++++++++++++ man/coredns-template.7 | 4 +- man/coredns-tls.7 | 4 +- man/coredns-trace.7 | 4 +- man/coredns-whoami.7 | 4 +- 39 files changed, 436 insertions(+), 156 deletions(-) create mode 100644 man/coredns-clouddns.7 delete mode 100644 man/coredns-federation.7 create mode 100644 man/coredns-sign.7 diff --git a/man/coredns-any.7 b/man/coredns-any.7 index 286849750ff..2fc03ec09a7 100644 --- a/man/coredns-any.7 +++ b/man/coredns-any.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ANY" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ANY" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-auto.7 b/man/coredns-auto.7 index 4dfd7758e24..fcd67317550 100644 --- a/man/coredns-auto.7 +++ b/man/coredns-auto.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-AUTO" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-AUTO" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-autopath.7 b/man/coredns-autopath.7 index 8211407f2e0..e96540e65de 100644 --- a/man/coredns-autopath.7 +++ b/man/coredns-autopath.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-AUTOPATH" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-AUTOPATH" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-bind.7 b/man/coredns-bind.7 index 67fbeec5c41..92a098962f7 100644 --- a/man/coredns-bind.7 +++ b/man/coredns-bind.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-BIND" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-BIND" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-cache.7 b/man/coredns-cache.7 index daa773bf53f..a229de73b40 100644 --- a/man/coredns-cache.7 +++ b/man/coredns-cache.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CACHE" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-CACHE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-cancel.7 b/man/coredns-cancel.7 index a15bc490c75..29480a9339f 100644 --- a/man/coredns-cancel.7 +++ b/man/coredns-cancel.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CANCEL" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-CANCEL" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-chaos.7 b/man/coredns-chaos.7 index e55f42a4dc2..72bb0b2f1b6 100644 --- a/man/coredns-chaos.7 +++ b/man/coredns-chaos.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-CHAOS" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-CHAOS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -8,7 +8,7 @@ .SH "DESCRIPTION" .PP This is useful for retrieving version or author information from the server by querying a TXT record -for a special domainname in the CH class. +for a special domain name in the CH class. .SH "SYNTAX" .PP diff --git a/man/coredns-clouddns.7 b/man/coredns-clouddns.7 new file mode 100644 index 00000000000..625af666a4a --- /dev/null +++ b/man/coredns-clouddns.7 @@ -0,0 +1,100 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-CLOUDDNS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fIclouddns\fP - enables serving zone data from GCP Cloud DNS. + +.SH "DESCRIPTION" +.PP +The \fIclouddns\fP plugin is useful for serving zones from resource record +sets in GCP Cloud DNS. This plugin supports all Google Cloud DNS +records +\[la]https://cloud.google.com/dns/docs/overview#supported_dns_record_types\[ra]. This plugin can +be used when CoreDNS is deployed on GCP or elsewhere. Note that this plugin accesses the resource +records through the Google Cloud API. For records in a privately hosted zone, it is not necessary to +place CoreDNS and this plugin in the associated VPC network. In fact the private hosted zone could +be created without any associated VPC and this plugin could still access the resource records under +the hosted zone. + +.SH "SYNTAX" +.PP +.RS + +.nf +clouddns [ZONE:PROJECT\_ID:HOSTED\_ZONE\_NAME...] { + credentials [FILENAME] + fallthrough [ZONES...] +} + +.fi +.RE + +.IP \(bu 4 +\fBZONE\fP the name of the domain to be accessed. When there are multiple zones with overlapping +domains (private vs. public hosted zone), CoreDNS does the lookup in the given order here. +Therefore, for a non-existing resource record, SOA response will be from the rightmost zone. +.IP \(bu 4 +\fBPROJECT_ID\fP the project ID of the Google Cloud project. +.IP \(bu 4 +\fBHOSTED\fIZONE\fPNAME\fP the name of the hosted zone that contains the resource record sets to be +accessed. +.IP \(bu 4 +\fB\fCcredentials\fR is used for reading the credential file. +.IP \(bu 4 +\fBFILENAME\fP GCP credentials file path (normally a .json file). +.IP \(bu 4 +\fB\fCfallthrough\fR If zone matches and no record can be generated, pass request to the next plugin. +If \fB[ZONES...]\fP is omitted, then fallthrough happens for all zones for which the plugin is +authoritative. If specific zones are listed (for example \fB\fCin-addr.arpa\fR and \fB\fCip6.arpa\fR), then +only queries for those zones will be subject to fallthrough. +.IP \(bu 4 +\fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block + + +.SH "EXAMPLES" +.PP +Enable clouddns with implicit GCP credentials and resolve CNAMEs via 10.0.0.1: + +.PP +.RS + +.nf +\&. { + clouddns example.org.:gcp\-example\-project:example\-zone + forward . 10.0.0.1 +} + +.fi +.RE + +.PP +Enable clouddns with fallthrough: + +.PP +.RS + +.nf +\&. { + clouddns example.org.:gcp\-example\-project:example\-zone clouddns example.com.:gcp\-example\-project:example\-zone\-2 { + fallthrough example.gov. + } +} + +.fi +.RE + +.PP +Enable clouddns with multiple hosted zones with the same domain: + +.PP +.RS + +.nf +\&. { + clouddns example.org.:gcp\-example\-project:example\-zone example.com.:gcp\-example\-project:other\-example\-zone +} + +.fi +.RE + diff --git a/man/coredns-debug.7 b/man/coredns-debug.7 index ce1da734ce3..b4f857a6e55 100644 --- a/man/coredns-debug.7 +++ b/man/coredns-debug.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-DEBUG" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-DEBUG" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-dnssec.7 b/man/coredns-dnssec.7 index 012afb6d12f..373923a0999 100644 --- a/man/coredns-dnssec.7 +++ b/man/coredns-dnssec.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-DNSSEC" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-DNSSEC" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-dnstap.7 b/man/coredns-dnstap.7 index 1477199e9cf..c0e95412eba 100644 --- a/man/coredns-dnstap.7 +++ b/man/coredns-dnstap.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-DNSTAP" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-DNSTAP" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-erratic.7 b/man/coredns-erratic.7 index 5c025b3bac9..782a976fa18 100644 --- a/man/coredns-erratic.7 +++ b/man/coredns-erratic.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ERRATIC" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ERRATIC" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-errors.7 b/man/coredns-errors.7 index e148d5f1b20..8a159468c39 100644 --- a/man/coredns-errors.7 +++ b/man/coredns-errors.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ERRORS" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ERRORS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-etcd.7 b/man/coredns-etcd.7 index 11ffb385b33..57d8a2e5c1e 100644 --- a/man/coredns-etcd.7 +++ b/man/coredns-etcd.7 @@ -1,20 +1,25 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ETCD" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ETCD" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIetcd\fP - enables reading zone data from an etcd version 3 instance. +\fIetcd\fP - enable SkyDNS service discovery from etcd. .SH "DESCRIPTION" +.PP +The \fIetcd\fP plugin implements the (older) SkyDNS service discovery service. It is \fInot\fP suitable as +a generic DNS zone data plugin. Only a subset of DNS record types are implemented, and subdomains +and delegations are not handled at all. + .PP The data in etcd instance has to be encoded as a message \[la]https://github.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service.go#L26\[ra] like SkyDNS -\[la]https://github.com/skynetservices/skydns\[ra]. It should also work just like SkyDNS. +\[la]https://github.com/skynetservices/skydns\[ra]. It works just like SkyDNS. .PP -The etcd plugin makes extensive use of the forward plugin to forward and query other servers in the +The etcd plugin makes extensive use of the \fIforward\fP plugin to forward and query other servers in the network. .SH "SYNTAX" @@ -28,7 +33,7 @@ etcd [ZONES...] .RE .IP \(bu 4 -\fBZONES\fP zones etcd should be authoritative for. +\fBZONES\fP zones \fIetcd\fP should be authoritative for. .PP @@ -86,23 +91,23 @@ is needed. .SH "SPECIAL BEHAVIOUR" .PP -CoreDNS etcd plugin leverages directory structure to look for related entries. For example an entry \fB\fC/skydns/test/skydns/mx\fR would have entries like \fB\fC/skydns/test/skydns/mx/a\fR, \fB\fC/skydns/test/skydns/mx/b\fR and so on. Similarly a directory \fB\fC/skydns/test/skydns/mx1\fR will have all \fB\fCmx1\fR entries. +The \fIetcd\fP plugin leverages directory structure to look for related entries. For example +an entry \fB\fC/skydns/test/skydns/mx\fR would have entries like \fB\fC/skydns/test/skydns/mx/a\fR, +\fB\fC/skydns/test/skydns/mx/b\fR and so on. Similarly a directory \fB\fC/skydns/test/skydns/mx1\fR will have all +\fB\fCmx1\fR entries. .PP -With etcd3, support for hierarchical keys are dropped -\[la]https://coreos.com/etcd/docs/latest/learning/api.html\[ra]. This means there are no directories but only flat keys with prefixes in etcd3. To accommodate lookups, etcdv3 plugin now does a lookup on prefix \fB\fC/skydns/test/skydns/mx/\fR to search for entries like \fB\fC/skydns/test/skydns/mx/a\fR etc, and if there is nothing found on \fB\fC/skydns/test/skydns/mx/\fR, it looks for \fB\fC/skydns/test/skydns/mx\fR to find entries like \fB\fC/skydns/test/skydns/mx1\fR. +With etcd3, support for hierarchical keys are +dropped +\[la]https://coreos.com/etcd/docs/latest/learning/api.html\[ra]. This means there are no directories +but only flat keys with prefixes in etcd3. To accommodate lookups, etcdv3 plugin now does a lookup +on prefix \fB\fC/skydns/test/skydns/mx/\fR to search for entries like \fB\fC/skydns/test/skydns/mx/a\fR etc, and +if there is nothing found on \fB\fC/skydns/test/skydns/mx/\fR, it looks for \fB\fC/skydns/test/skydns/mx\fR to +find entries like \fB\fC/skydns/test/skydns/mx1\fR. .PP This causes two lookups from CoreDNS to etcdv3 in certain cases. -.SH "MIGRATION TO "\fB\fCetcdv3\fR" API" -.PP -With CoreDNS release \fB\fC1.2.0\fR, you'll need to migrate existing CoreDNS related data (if any) on your etcd server to etcdv3 API. This is because with \fB\fCetcdv3\fR support, CoreDNS can't see the data stored to an etcd server using \fB\fCetcdv2\fR API. - -.PP -Refer this blog by CoreOS team -\[la]https://coreos.com/blog/migrating-applications-etcd-v3.html\[ra] to migrate to etcdv3 API. - .SH "EXAMPLES" .PP This is the default SkyDNS setup, with everything specified in full: @@ -159,12 +164,19 @@ etcd skydns.local { .RE .PP -Before getting started with these examples, please setup \fB\fCetcdctl\fR (with \fB\fCetcdv3\fR API) as explained here -\[la]https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html\[ra]. This will help you to put sample keys in your etcd server. +Before getting started with these examples, please setup \fB\fCetcdctl\fR (with \fB\fCetcdv3\fR API) as explained +here +\[la]https://coreos.com/etcd/docs/latest/dev-guide/interacting_v3.html\[ra]. This will help you to put +sample keys in your etcd server. .PP -If you prefer, you can use \fB\fCcurl\fR to populate the \fB\fCetcd\fR server, but with \fB\fCcurl\fR the endpoint URL depends on the version of \fB\fCetcd\fR. For instance, \fB\fCetcd v3.2\fR or before uses only [CLIENT-URL]/v3alpha/* while \fB\fCetcd v3.5\fR or later uses [CLIENT-URL]/v3/* . Also, Key and Value must be base64 encoded in the JSON payload. With \fB\fCetcdctl\fR these details are automatically taken care off. You can check this document -\[la]https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/api_grpc_gateway.md#notes\[ra] for details. +If you prefer, you can use \fB\fCcurl\fR to populate the \fB\fCetcd\fR server, but with \fB\fCcurl\fR the +endpoint URL depends on the version of \fB\fCetcd\fR. For instance, \fB\fCetcd v3.2\fR or before uses only +[CLIENT-URL]/v3alpha/* while \fB\fCetcd v3.5\fR or later uses [CLIENT-URL]/v3/* . Also, Key and Value must +be base64 encoded in the JSON payload. With \fB\fCetcdctl\fR these details are automatically taken care +of. You can check this document +\[la]https://github.com/coreos/etcd/blob/master/Documentation/dev-guide/api_grpc_gateway.md#notes\[ra] +for details. .SS "REVERSE ZONES" .PP @@ -210,7 +222,9 @@ reverse.skydns.local. .SS "ZONE NAME AS A RECORD" .PP -The zone name itself can be used as A record. This behavior can be achieved by writing special entries to the ETCD path of your zone. If your zone is named \fB\fCskydns.local\fR for example, you can create an \fB\fCA\fR record for this zone as follows: +The zone name itself can be used as an \fB\fCA\fR record. This behavior can be achieved by writing special +entries to the ETCD path of your zone. If your zone is named \fB\fCskydns.local\fR for example, you can +create an \fB\fCA\fR record for this zone as follows: .PP .RS diff --git a/man/coredns-federation.7 b/man/coredns-federation.7 deleted file mode 100644 index a7c40872044..00000000000 --- a/man/coredns-federation.7 +++ /dev/null @@ -1,54 +0,0 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-FEDERATION" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" - -.SH "NAME" -.PP -\fIfederation\fP - enables federated queries to be resolved via the kubernetes plugin. - -.SH "DESCRIPTION" -.PP -Enabling this plugin allows -Federated -\[la]https://kubernetes.io/docs/tasks/federation/federation-service-discovery/\[ra] queries to be -resolved via the kubernetes plugin. - -.PP -Enabling \fIfederation\fP without also having \fIkubernetes\fP is a noop. - -.SH "SYNTAX" -.PP -.RS - -.nf -federation [ZONES...] { - NAME DOMAIN -} - -.fi -.RE - -.IP \(bu 4 -Each \fBNAME\fP and \fBDOMAIN\fP defines federation membership. One entry for each. A duplicate -\fBNAME\fP will silently overwrite any previous value. - - -.SH "EXAMPLES" -.PP -Here we handle all service requests in the \fB\fCprod\fR and \fB\fCstage\fR federations. - -.PP -.RS - -.nf -\&. { - kubernetes cluster.local - federation cluster.local { - prod prod.feddomain.com - staging staging.feddomain.com - } - forward . 192.168.1.12 -} - -.fi -.RE - diff --git a/man/coredns-file.7 b/man/coredns-file.7 index 90327a38ead..193cb6d8bf1 100644 --- a/man/coredns-file.7 +++ b/man/coredns-file.7 @@ -1,4 +1,4 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl +.\" Generated by Mmark Markdown Processer - mmark.miek.nl .TH "COREDNS-FILE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" diff --git a/man/coredns-forward.7 b/man/coredns-forward.7 index 2180506c9b8..52bb9c4f869 100644 --- a/man/coredns-forward.7 +++ b/man/coredns-forward.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-FORWARD" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-FORWARD" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-grpc.7 b/man/coredns-grpc.7 index 17516d06f13..3068c0043d2 100644 --- a/man/coredns-grpc.7 +++ b/man/coredns-grpc.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-GRPC" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-GRPC" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-health.7 b/man/coredns-health.7 index ccede9d5f2b..78d95720822 100644 --- a/man/coredns-health.7 +++ b/man/coredns-health.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-HEALTH" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-HEALTH" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -65,7 +65,7 @@ net { .RE .PP -Doing this is supported but both endponts ":8080" and ":8081" will export the exact same health. +Doing this is supported but both endpoints ":8080" and ":8081" will export the exact same health. .SH "METRICS" .PP diff --git a/man/coredns-hosts.7 b/man/coredns-hosts.7 index f3ba8d1b80c..0bb28943849 100644 --- a/man/coredns-hosts.7 +++ b/man/coredns-hosts.7 @@ -1,4 +1,4 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl +.\" Generated by Mmark Markdown Processer - mmark.miek.nl .TH "COREDNS-HOSTS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" diff --git a/man/coredns-import.7 b/man/coredns-import.7 index cc4ab58d466..757e2a348ae 100644 --- a/man/coredns-import.7 +++ b/man/coredns-import.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-IMPORT" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-IMPORT" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-k8s_external.7 b/man/coredns-k8s_external.7 index 8c7a74e9157..4293fff1cba 100644 --- a/man/coredns-k8s_external.7 +++ b/man/coredns-k8s_external.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-K8S_EXTERNAL" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-K8S_EXTERNAL" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -12,11 +12,11 @@ service. This plugin is only useful if the \fIkubernetes\fP plugin is also loade .PP The plugin uses an external zone to resolve in-cluster IP addresses. It only handles queries for A, -AAAA and SRV records, all others result in NODATA responses. To make it a proper DNS zone it handles +AAAA and SRV records; all others result in NODATA responses. To make it a proper DNS zone, it handles SOA and NS queries for the apex of the zone. .PP -By default the apex of the zone will look like (assuming the zone used is \fB\fCexample.org\fR): +By default the apex of the zone will look like the following (assuming the zone used is \fB\fCexample.org\fR): .PP .RS @@ -38,12 +38,12 @@ ns1.dns.example.org. 5 IN AAAA .... .RE .PP -Note we use the \fB\fCdns\fR subdomain to place the records the DNS needs (see the \fB\fCapex\fR directive). Also +Note that we use the \fB\fCdns\fR subdomain for the records DNS needs (see the \fB\fCapex\fR directive). Also note the SOA's serial number is static. The IP addresses of the nameserver records are those of the CoreDNS service. .PP -The \fIk8s_external\fP plugin handles the subdomain \fB\fCdns\fR and the apex of the zone by itself, all other +The \fIk8s_external\fP plugin handles the subdomain \fB\fCdns\fR and the apex of the zone itself; all other queries are resolved to addresses in the cluster. .SH "SYNTAX" @@ -61,7 +61,7 @@ k8s\_external [ZONE...] .PP -If you want to change the apex domain or use a different TTL for the return records you can use +If you want to change the apex domain or use a different TTL for the returned records you can use this extended syntax. .PP @@ -77,13 +77,13 @@ k8s\_external [ZONE...] { .RE .IP \(bu 4 -\fBAPEX\fP is the name (DNS label) to use the apex records, defaults to \fB\fCdns\fR. +\fBAPEX\fP is the name (DNS label) to use for the apex records; it defaults to \fB\fCdns\fR. .IP \(bu 4 \fB\fCttl\fR allows you to set a custom \fBTTL\fP for responses. The default is 5 (seconds). .PP -Enable names under \fB\fCexample.org\fR to be resolved to in cluster DNS addresses. +Enable names under \fB\fCexample.org\fR to be resolved to in-cluster DNS addresses. .PP .RS @@ -98,7 +98,7 @@ Enable names under \fB\fCexample.org\fR to be resolved to in cluster DNS address .RE .PP -With the Corefile above, the following Service will get an \fB\fCA\fR record for \fB\fCtest.default.example.org\fR with IP address \fB\fC192.168.200.123\fR. +With the Corefile above, the following Service will get an \fB\fCA\fR record for \fB\fCtest.default.example.org\fR with the IP address \fB\fC192.168.200.123\fR. .PP .RS diff --git a/man/coredns-loadbalance.7 b/man/coredns-loadbalance.7 index f10d44d491f..0b5109a9a05 100644 --- a/man/coredns-loadbalance.7 +++ b/man/coredns-loadbalance.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-LOADBALANCE" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-LOADBALANCE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-log.7 b/man/coredns-log.7 index 4c5c8be6865..c8e48de19f3 100644 --- a/man/coredns-log.7 +++ b/man/coredns-log.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-LOG" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-LOG" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-loop.7 b/man/coredns-loop.7 index 7d11b0fb72f..27ba8901ecb 100644 --- a/man/coredns-loop.7 +++ b/man/coredns-loop.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-LOOP" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-LOOP" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-metadata.7 b/man/coredns-metadata.7 index 8341e146a38..e3598282e04 100644 --- a/man/coredns-metadata.7 +++ b/man/coredns-metadata.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-METADATA" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-METADATA" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -10,7 +10,7 @@ By enabling \fImetadata\fP any plugin that implements metadata.Provider interface \[la]https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider\[ra] will be called for -each DNS query, at beginning of the process for that query, in order to add it's own meta data to +each DNS query, at beginning of the process for that query, in order to add its own meta data to context. .PP diff --git a/man/coredns-metrics.7 b/man/coredns-metrics.7 index 31e029c90ea..8e98634233c 100644 --- a/man/coredns-metrics.7 +++ b/man/coredns-metrics.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-METRICS" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-METRICS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-nsid.7 b/man/coredns-nsid.7 index d22b18af2e9..df678afb637 100644 --- a/man/coredns-nsid.7 +++ b/man/coredns-nsid.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-NSID" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-NSID" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-pprof.7 b/man/coredns-pprof.7 index b96ba86dba6..21556dead49 100644 --- a/man/coredns-pprof.7 +++ b/man/coredns-pprof.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-PPROF" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-PPROF" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-ready.7 b/man/coredns-ready.7 index 95c499bdedc..cc0c370d576 100644 --- a/man/coredns-ready.7 +++ b/man/coredns-ready.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-READY" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-READY" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -15,7 +15,7 @@ will not be queried again. .PP Each Server Block that enables the \fIready\fP plugin will have the plugins \fIin that server block\fP report readiness into the /ready endpoint that runs on the same port. This also means that the -\fIsame\fP plugin with different configurations (in potentialy \fIdifferent\fP Server Blocks) will have +\fIsame\fP plugin with different configurations (in potentially \fIdifferent\fP Server Blocks) will have their readiness reported as the union of their respective readinesses. .SH "SYNTAX" diff --git a/man/coredns-reload.7 b/man/coredns-reload.7 index 4a52f6fbfc6..f78d55bf6e0 100644 --- a/man/coredns-reload.7 +++ b/man/coredns-reload.7 @@ -1,4 +1,4 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl +.\" Generated by Mmark Markdown Processer - mmark.miek.nl .TH "COREDNS-RELOAD" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" @@ -125,7 +125,7 @@ fail loading the new Corefile, abort and keep using the old process .PP After the aborted attempt to reload we are left with the old processes running, but the listener is -closed in step 1; so the health endpoint is broken. The same can hopen in the prometheus metrics plugin. +closed in step 1; so the health endpoint is broken. The same can happen in the prometheus metrics plugin. .PP In general be careful with assigning new port and expecting reload to work fully. diff --git a/man/coredns-rewrite.7 b/man/coredns-rewrite.7 index a85a811717e..4cd0022b083 100644 --- a/man/coredns-rewrite.7 +++ b/man/coredns-rewrite.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-REWRITE" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-REWRITE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-root.7 b/man/coredns-root.7 index 9c4ee7707ab..e5c882928d0 100644 --- a/man/coredns-root.7 +++ b/man/coredns-root.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ROOT" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ROOT" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-secondary.7 b/man/coredns-secondary.7 index b3f6ac3627e..45d5e257cb6 100644 --- a/man/coredns-secondary.7 +++ b/man/coredns-secondary.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-SECONDARY" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-SECONDARY" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-sign.7 b/man/coredns-sign.7 new file mode 100644 index 00000000000..773e930ab5b --- /dev/null +++ b/man/coredns-sign.7 @@ -0,0 +1,220 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-SIGN" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fIsign\fP - add DNSSEC records to zone files. + +.SH "DESCRIPTION" +.PP +The \fIsign\fP plugin is used to sign (see RFC 6781) zones. In this process DNSSEC resource records are +added. The signatures that sign the resource records sets have an expiration date, this means the +signing process must be repeated before this expiration data is reached. Otherwise the zone's data +will go BAD (RFC 4035, Section 5.5). The \fIsign\fP plugin takes care of this. \fISign\fP works, but has +a couple of limitations, see the "Bugs" section. + +.PP +Only NSEC is supported, \fIsign\fP does not support NSEC3. + +.PP +\fISign\fP works in conjunction with the \fIfile\fP and \fIauto\fP plugins; this plugin \fBsigns\fP the zones +files, \fIauto\fP and \fIfile\fP \fBserve\fP the zones \fIdata\fP. + +.PP +For this plugin to work at least one Common Signing Key, (see coredns-keygen(1)) is needed. This key +(or keys) will be used to sign the entire zone. \fISign\fP does not support the ZSK/KSK split, nor will +it do key or algorithm rollovers - it just signs. + +.PP +\fISign\fP will: + +.IP \(bu 4 +(Re)-sign the zone with the CSK(s) when: + +.RS +.IP \(en 4 +the last time it was signed is more than a 6 days ago. Each zone will have some jitter +applied to the inception date. +.IP \(en 4 +the signature only has 14 days left before expiring. + +.RE + + +Both these dates are only checked on the SOA's signature(s). +.IP \(bu 4 +Create signatures that have an inception of -3 hours (minus a jitter between 0 and 18 hours) +and a expiration of +32 days for every given DNSKEY. +.IP \(bu 4 +Add or replace \fIall\fP apex CDS/CDNSKEY records with the ones derived from the given keys. For +each key two CDS are created one with SHA1 and another with SHA256. +.IP \(bu 4 +Update the SOA's serial number to the \fIUnix epoch\fP of when the signing happens. This will +overwrite \fIany\fP previous serial number. + + +.PP +Thus there are two ways that dictate when a zone is signed. Normally every 6 days (plus jitter) it +will be resigned. If for some reason we fail this check, the 14 days before expiring kicks in. + +.PP +Keys are named (following BIND9): \fB\fCK++.key\fR and \fB\fCK++.private\fR. +The keys \fBmust not\fP be included in your zone; they will be added by \fIsign\fP. These keys can be +generated with \fB\fCcoredns-keygen\fR or BIND9's \fB\fCdnssec-keygen\fR. You don't have to adhere to this naming +scheme, but then you need to name your keys explicitly, see the \fB\fCkeys file\fR directive. + +.PP +A generated zone is written out in a file named \fB\fCdb..signed\fR in the directory named by the +\fB\fCdirectory\fR directive (which defaults to \fB\fC/var/lib/coredns\fR). + +.SH "SYNTAX" +.PP +.RS + +.nf +sign DBFILE [ZONES...] { + key file|directory KEY...|DIR... + directory DIR +} + +.fi +.RE + +.IP \(bu 4 +\fBDBFILE\fP the zone database file to read and parse. If the path is relative, the path from the +\fIroot\fP directive will be prepended to it. +.IP \(bu 4 +\fBZONES\fP zones it should be sign for. If empty, the zones from the configuration block are +used. +.IP \(bu 4 +\fB\fCkey\fR specifies the key(s) (there can be multiple) to sign the zone. If \fB\fCfile\fR is +used the \fBKEY\fP's filenames are used as is. If \fB\fCdirectory\fR is used, \fIsign\fP will look in \fBDIR\fP +for \fB\fCK++\fR files. Any metadata in these files (Activate, Publish, etc.) is +\fIignored\fP. These keys must also be Key Signing Keys (KSK). +.IP \(bu 4 +\fB\fCdirectory\fR specifies the \fBDIR\fP where CoreDNS should save zones that have been signed. +If not given this defaults to \fB\fC/var/lib/coredns\fR. The zones are saved under the name +\fB\fCdb..signed\fR. If the path is relative the path from the \fIroot\fP directive will be prepended +to it. + + +.PP +Keys can be generated with \fB\fCcoredns-keygen\fR, to create one for use in the \fIsign\fP plugin, use: +\fB\fCcoredns-keygen example.org\fR or \fB\fCdnssec-keygen -a ECDSAP256SHA256 -f KSK example.org\fR. + +.SH "EXAMPLES" +.PP +Sign the \fB\fCexample.org\fR zone contained in the file \fB\fCdb.example.org\fR and write the result to +\fB\fC./db.example.org.signed\fR to let the \fIfile\fP plugin pick it up and serve it. The keys used +are read from \fB\fC/etc/coredns/keys/Kexample.org.key\fR and \fB\fC/etc/coredns/keys/Kexample.org.private\fR. + +.PP +.RS + +.nf +example.org { + file db.example.org.signed + + sign db.example.org { + key file /etc/coredns/keys/Kexample.org + directory . + } +} + +.fi +.RE + +.PP +Running this leads to the following log output (note the timers in this example have been set to +shorter intervals). + +.PP +.RS + +.nf +[WARNING] plugin/file: Failed to open "open /tmp/db.example.org.signed: no such file or directory": trying again in 1m0s +[INFO] plugin/sign: Signing "example.org." because open /tmp/db.example.org.signed: no such file or directory +[INFO] plugin/sign: Successfully signed zone "example.org." in "/tmp/db.example.org.signed" with key tags "59725" and 1564766865 SOA serial, elapsed 9.357933ms, next: 2019\-08\-02T22:27:45.270Z +[INFO] plugin/file: Successfully reloaded zone "example.org." in "/tmp/db.example.org.signed" with serial 1564766865 + +.fi +.RE + +.PP +Or use a single zone file for \fImultiple\fP zones, note that the \fBZONES\fP are repeated for both plugins. +Also note this outputs \fImultiple\fP signed output files. Here we use the default output directory +\fB\fC/var/lib/coredns\fR. + +.PP +.RS + +.nf +\&. { + file /var/lib/coredns/db.example.org.signed example.org + file /var/lib/coredns/db.example.net.signed example.net + sign db.example.org example.org example.net { + key directory /etc/coredns/keys + } +} + +.fi +.RE + +.PP +This is the same configuration, but the zones are put in the server block, but note that you still +need to specify what file is served for what zone in the \fIfile\fP plugin: + +.PP +.RS + +.nf +example.org example.net { + file var/lib/coredns/db.example.org.signed example.org + file var/lib/coredns/db.example.net.signed example.net + sign db.example.org { + key directory /etc/coredns/keys + } +} + +.fi +.RE + +.PP +Be careful to fully list the origins you want to sign, if you don't: + +.PP +.RS + +.nf +example.org example.net { + sign plugin/sign/testdata/db.example.org miek.org { + key file /etc/coredns/keys/Kexample.org + } +} + +.fi +.RE + +.PP +This will lead to \fB\fCdb.example.org\fR be signed \fItwice\fP, as this entire section is parsed twice because +you have specified the origins \fB\fCexample.org\fR and \fB\fCexample.net\fR in the server block. + +.PP +Forcibly resigning a zone can be accomplished by removing the signed zone file (CoreDNS will keep on +serving it from memory), and sending SIGUSR1 to the process to make it reload and resign the zone +file. + +.SH "ALSO SEE" +.PP +The DNSSEC RFCs: RFC 4033, RFC 4034 and RFC 4035. And the BCP on DNSSEC, RFC 6781. Further more the +manual pages coredns-keygen(1) and dnssec-keygen(8). And the \fIfile\fP plugin's documentation. + +.PP +Coredns-keygen can be found at https://github.com/coredns/coredns-utils +\[la]https://github.com/coredns/coredns-utils\[ra] in the coredns-keygen directory. + +.SH "BUGS" +.PP +\fB\fCkeys directory\fR is not implemented. Glue records are currently signed, and no DS records are added +for child zones. + diff --git a/man/coredns-template.7 b/man/coredns-template.7 index c79cee38fb3..54aa9fda63d 100644 --- a/man/coredns-template.7 +++ b/man/coredns-template.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-TEMPLATE" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-TEMPLATE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-tls.7 b/man/coredns-tls.7 index e06a3d6a16d..db0022d2058 100644 --- a/man/coredns-tls.7 +++ b/man/coredns-tls.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-TLS" 7 "July 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-TLS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-trace.7 b/man/coredns-trace.7 index 265b43069e8..eb5391ebf3e 100644 --- a/man/coredns-trace.7 +++ b/man/coredns-trace.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-TRACE" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-TRACE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-whoami.7 b/man/coredns-whoami.7 index 6927005736d..906527920fc 100644 --- a/man/coredns-whoami.7 +++ b/man/coredns-whoami.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-WHOAMI" 7 "June 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-WHOAMI" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP From 739cba48b6e68113ac4491c93182efb1655e6fc4 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 31 Aug 2019 09:54:43 +0100 Subject: [PATCH 158/324] Notes update for the release. (#3231) * Notes update for the release. acl might not make tomorrow (plan to release then), so remove it for now. Add other PRs that got merged and flesh it out some more. Signed-off-by: Miek Gieben * Remove xfr highlight for k8s Signed-off-by: Miek Gieben * Small tweaks while preparing the release Signed-off-by: Miek Gieben --- notes/coredns-1.6.3.md | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/notes/coredns-1.6.3.md b/notes/coredns-1.6.3.md index 8bea15be4ff..10a918277b4 100644 --- a/notes/coredns-1.6.3.md +++ b/notes/coredns-1.6.3.md @@ -3,7 +3,7 @@ title = "CoreDNS-1.6.3 Release" description = "CoreDNS-1.6.3 Release Notes." tags = ["Release", "1.6.3", "Notes"] release = "1.6.3" -date = 2019-08-31T14:35:47+01:00 +date = 2019-08-31T07:30:47+00:00 author = "coredns" +++ @@ -12,16 +12,21 @@ The CoreDNS team has released In this release we have moved the *federation* plugin to [github.com/coredns/federation](https://github.com/coredns/federation), but it is still fully -functional in this version. In version 1.7.0 we expect to fully deprecate it. +functional in this version. In version 1.7.0 we expect to deprecate it. -Further more a slew a spelling corrections and other minor improvements and polish. **And** three(!) -new plugins. +Further more a slew a spelling corrections, and other minor improvements and polish. **And** two new +plugins: *clouddns* and *sign*. # Plugins -* [*acl*](/plugins/acl) blocks queries depending on their source IP address. * [*clouddns*](/plugins/clouddns) to enable serving zone data from GCP Cloud DNS. * [*sign*](/plugins/sign) that (DNSSEC) signs your zonefiles (in its most basic form). +* [*file*](/plugins/file) various update, plug a memory leak when doing zone transfers, among other + things. + +We've removed the time stamping from `pkg/log` as timestamps are *also* added by the logging +aggregators, like `journald` or inside Kubernetes. And a small ASCII art logo is now printed when +CoreDNS starts up. ## Brought to You By @@ -48,11 +53,14 @@ zhangguoyan, ## Noteworthy Changes * fuzzing: Add Continuous Fuzzing Integration to Fuzzit (https://github.com/coredns/coredns/pull/3093) +* pkg/log: remove timestamp (https://github.com/coredns/coredns/pull/3218) * plugin/clouddns: Add Google Cloud DNS plugin (https://github.com/coredns/coredns/pull/3011) * plugin/federation: Move federation plugin to github.com/coredns/federation (https://github.com/coredns/coredns/pull/3139) * plugin/file: close reader for reload (https://github.com/coredns/coredns/pull/3196) * plugin/file: less notify logging spam (https://github.com/coredns/coredns/pull/3212) * plugin/file: respond correctly to IXFR message (https://github.com/coredns/coredns/pull/3177) +* plugin/file: rework outgoing axfr (https://github.com/coredns/coredns/pull/3227) * plugin/{health,ready}: return standardized text for ready and health endpoint (https://github.com/coredns/coredns/pull/3195) * plugin/k8s_external handle NS records (https://github.com/coredns/coredns/pull/3160) * plugin/kubernetes: handle NS records (https://github.com/coredns/coredns/pull/3160) +* startup: add logo (https://github.com/coredns/coredns/pull/3230) From 37b9550d62685d450553437776978518ccca631b Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 31 Aug 2019 10:08:22 +0100 Subject: [PATCH 159/324] Up version to 1.6.3 (#3233) Signed-off-by: Miek Gieben --- coremain/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coremain/version.go b/coremain/version.go index 50ae6402155..ae44275ba2d 100644 --- a/coremain/version.go +++ b/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.6.2" + CoreVersion = "1.6.3" coreName = "CoreDNS" serverType = "dns" ) From 5d9a3dbd333fc0325349fdfea69371b390ddd66f Mon Sep 17 00:00:00 2001 From: orangelynx Date: Sun, 1 Sep 2019 09:26:55 +0200 Subject: [PATCH 160/324] Fix for 3235. Adds go.sum and go.mod to .dockerignore to avoid permission issues during docker build process. (#3236) --- .dockerignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.dockerignore b/.dockerignore index 98e5cfe14e3..7dfc5a97e40 100644 --- a/.dockerignore +++ b/.dockerignore @@ -10,3 +10,5 @@ test/* vendor/* build/* release/* +go.sum +go.mod From 7894154bfd2f1960c6842318d8ee99c194a04179 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2019 17:02:49 +0100 Subject: [PATCH 161/324] build(deps): bump github.com/aws/aws-sdk-go from 1.23.8 to 1.23.13 (#3237) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.23.8 to 1.23.13. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.23.8...v1.23.13) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index c2364473751..28224630fe3 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.23.8 + github.com/aws/aws-sdk-go v1.23.13 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/bbolt v1.3.2 // indirect diff --git a/go.sum b/go.sum index 82c84388dcb..81c952daaa3 100644 --- a/go.sum +++ b/go.sum @@ -46,6 +46,8 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.23.8 h1:G/azJoBN0pnhB3B+0eeC4yyVFYIIad6bbzg6wwtImqk= github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= +github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 79f37a1460cc52ce6c63110f4df33316a36af3a5 Mon Sep 17 00:00:00 2001 From: An Xiao Date: Wed, 4 Sep 2019 23:43:45 +0800 Subject: [PATCH 162/324] Add plugin ACL for source ip filtering (#3103) * Add plugin ACL for source ip filtering Signed-off-by: An Xiao * Allow all arguments to be optional and support multiple qtypes in a single policy Signed-off-by: An Xiao * Add newline before third party imports Signed-off-by: An Xiao * Use camel instead of underscore in method name Signed-off-by: An Xiao * Start with an upper case letter in t.Errorf() Signed-off-by: An Xiao * Use the qtype parse logic in miekg/dns Signed-off-by: An Xiao * Use third party trie implementation as the ip filter Signed-off-by: An Xiao * Update based on rdrozhdzh's comment Signed-off-by: An Xiao * Change the type of action to int Signed-off-by: An Xiao * Add IPv6 support Signed-off-by: An Xiao * Update plugin.cfg Signed-off-by: An Xiao * Remove file functionality Signed-off-by: An Xiao * Update Signed-off-by: Xiao An * Update README Signed-off-by: Xiao An * remove comments Signed-off-by: Xiao An * update Signed-off-by: Xiao An * Update dependency Signed-off-by: Xiao An * Update Signed-off-by: Xiao An * Update test Signed-off-by: Xiao An * Add OWNERS Signed-off-by: Xiao An * Refactor shouldBlock and skip useless check Signed-off-by: Xiao An * Introduce ActionNone Signed-off-by: Xiao An * Update label name Signed-off-by: Xiao An * Avoid capitalizing private types Signed-off-by: Xiao An --- core/dnsserver/zdirectives.go | 1 + core/plugin/zplugin.go | 1 + go.mod | 3 + go.sum | 30 +-- plugin.cfg | 1 + plugin/acl/OWNERS | 7 + plugin/acl/README.md | 68 ++++++ plugin/acl/acl.go | 115 ++++++++++ plugin/acl/acl_test.go | 396 ++++++++++++++++++++++++++++++++++ plugin/acl/metrics.go | 24 +++ plugin/acl/setup.go | 166 ++++++++++++++ plugin/acl/setup_test.go | 245 +++++++++++++++++++++ 12 files changed, 1031 insertions(+), 26 deletions(-) create mode 100644 plugin/acl/OWNERS create mode 100644 plugin/acl/README.md create mode 100644 plugin/acl/acl.go create mode 100644 plugin/acl/acl_test.go create mode 100644 plugin/acl/metrics.go create mode 100644 plugin/acl/setup.go create mode 100644 plugin/acl/setup_test.go diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go index 6dfb99226e8..d1310a7f1b6 100644 --- a/core/dnsserver/zdirectives.go +++ b/core/dnsserver/zdirectives.go @@ -26,6 +26,7 @@ var Directives = []string{ "errors", "log", "dnstap", + "acl", "any", "chaos", "loadbalance", diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index d522ed29490..3a31f412749 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -5,6 +5,7 @@ package plugin import ( // Include all plugins. _ "github.com/caddyserver/caddy/onevent" + _ "github.com/coredns/coredns/plugin/acl" _ "github.com/coredns/coredns/plugin/any" _ "github.com/coredns/coredns/plugin/auto" _ "github.com/coredns/coredns/plugin/autopath" diff --git a/go.mod b/go.mod index 28224630fe3..bfa3445762f 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect + github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 github.com/evanphx/json-patch v4.1.0+incompatible // indirect github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 @@ -26,6 +27,7 @@ require ( github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/imdario/mergo v0.3.7 // indirect + github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/miekg/dns v1.1.16 @@ -37,6 +39,7 @@ require ( github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/common v0.6.0 + github.com/seiflotfy/cuckoofilter v0.0.0-20190302225222-764cb5258d9b github.com/sirupsen/logrus v1.4.2 // indirect github.com/spf13/cobra v0.0.5 // indirect github.com/tinylib/msgp v1.1.0 // indirect diff --git a/go.sum b/go.sum index 81c952daaa3..155e15813d7 100644 --- a/go.sum +++ b/go.sum @@ -37,7 +37,6 @@ github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Shopify/sarama v1.21.0 h1:0GKs+e8mn1RRUzfg9oUXv3v7ZieQLmOZF/bfnmmGhM8= github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= -github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -49,7 +48,6 @@ github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -64,7 +62,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9W4dScCwdRESOacCCdF3FkuB5pBIOv1U= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= -github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= @@ -86,6 +83,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4= @@ -109,9 +107,7 @@ github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710/go. github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= @@ -125,7 +121,6 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= @@ -134,7 +129,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -142,12 +136,10 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pO github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -178,7 +170,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -194,10 +185,11 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 h1:d3VSuNcgTCn21dNMm8g412Fck/XWFmMj4nJhhHT7ZZ0= +github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062/go.mod h1:PcNJqIlcX/dj3DTG/+QQnRvSgTMG6CLpRMjWcv4+J6w= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= @@ -211,14 +203,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= @@ -269,9 +257,7 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= @@ -295,10 +281,9 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/seiflotfy/cuckoofilter v0.0.0-20190302225222-764cb5258d9b/go.mod h1:ET5mVvNjwaGXRgZxO9UZr7X+8eAf87AfIYNwRSp9s4Y= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -314,7 +299,6 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -383,7 +367,6 @@ golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -452,14 +435,12 @@ golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -473,7 +454,6 @@ google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -481,7 +461,6 @@ gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -494,7 +473,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/plugin.cfg b/plugin.cfg index ff856f72468..66552cd802d 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -35,6 +35,7 @@ prometheus:metrics errors:errors log:log dnstap:dnstap +acl:acl any:any chaos:chaos loadbalance:loadbalance diff --git a/plugin/acl/OWNERS b/plugin/acl/OWNERS new file mode 100644 index 00000000000..5921fce191b --- /dev/null +++ b/plugin/acl/OWNERS @@ -0,0 +1,7 @@ +reviewers: + - miekg + - ihac +approvers: + - miekg + - ihac + diff --git a/plugin/acl/README.md b/plugin/acl/README.md new file mode 100644 index 00000000000..49b6895a920 --- /dev/null +++ b/plugin/acl/README.md @@ -0,0 +1,68 @@ +# acl + +*acl* - enforces access control policies on source ip and prevents unauthorized access to DNS servers. + +## Description + +With `acl` enabled, users are able to block suspicous DNS queries by configuring IP filter rule sets, i.e. allowing authorized queries to recurse or blocking unauthorized queries. + +This plugin can be used multiple times per Server Block. + +## Syntax + +``` +acl [ZONES...] { + ACTION [type QTYPE...] [net SOURCE...] +} +``` + +- **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block are used. +- **ACTION** (*allow* or *block*) defines the way to deal with DNS queries matched by this rule. The default action is *allow*, which means a DNS query not matched by any rules will be allowed to recurse. +- **QTYPE** is the query type to match for the requests to be allowed or blocked. Common resource record types are supported. `*` stands for all record types. The default behavior for an omitted `type QTYPE...` is to match all kinds of DNS queries (same as `type *`). +- **SOURCE** is the source IP address to match for the requests to be allowed or blocked. Typical CIDR notation and single IP address are supported. `*` stands for all possible source IP addresses. + +## Examples + +To demonstrate the usage of plugin acl, here we provide some typical examples. + +Block all DNS queries with record type A from 192.168.0.0/16: + +~~~ Corefile +. { + acl { + block type A net 192.168.0.0/16 + } +} +~~~ + +Block all DNS queries from 192.168.0.0/16 except for 192.168.1.0/24: + +~~~ Corefile +. { + acl { + allow net 192.168.1.0/24 + block net 192.168.0.0/16 + } +} +``` + +Allow only DNS queries from 192.168.0.0/24 and 192.168.1.0/24: + +~~~ Corefile +. { + acl { + allow net 192.168.0.0/16 192.168.1.0/24 + block + } +} +~~~ + +Block all DNS queries from 192.168.1.0/24 towards a.example.org: + +~~~ Corefile +example.org { + acl a.example.org { + block net 192.168.1.0/24 + } +} +~~~ diff --git a/plugin/acl/acl.go b/plugin/acl/acl.go new file mode 100644 index 00000000000..b25138a309e --- /dev/null +++ b/plugin/acl/acl.go @@ -0,0 +1,115 @@ +package acl + +import ( + "context" + "net" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/metrics" + clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/request" + + "github.com/infobloxopen/go-trees/iptree" + "github.com/miekg/dns" +) + +var log = clog.NewWithPlugin("acl") + +// ACL enforces access control policies on DNS queries. +type ACL struct { + Next plugin.Handler + + Rules []rule +} + +// rule defines a list of Zones and some ACL policies which will be +// enforced on them. +type rule struct { + zones []string + policies []policy +} + +// action defines the action against queries. +type action int + +// policy defines the ACL policy for DNS queries. +// A policy performs the specified action (block/allow) on all DNS queries +// matched by source IP or QTYPE. +type policy struct { + action action + qtypes map[uint16]struct{} + filter *iptree.Tree +} + +const ( + // actionNone does nothing on the queries. + actionNone = iota + // actionAllow allows authorized queries to recurse. + actionAllow + // actionBlock blocks unauthorized queries towards protected DNS zones. + actionBlock +) + +// ServeDNS implements the plugin.Handler interface. +func (a ACL) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + +RulesCheckLoop: + for _, rule := range a.Rules { + // check zone. + zone := plugin.Zones(rule.zones).Matches(state.Name()) + if zone == "" { + continue + } + + action := matchWithPolicies(rule.policies, w, r) + switch action { + case actionBlock: + { + m := new(dns.Msg) + m.SetRcode(r, dns.RcodeRefused) + w.WriteMsg(m) + RequestBlockCount.WithLabelValues(metrics.WithServer(ctx), zone).Inc() + return dns.RcodeSuccess, nil + } + case actionAllow: + { + break RulesCheckLoop + } + } + } + + RequestAllowCount.WithLabelValues(metrics.WithServer(ctx)).Inc() + return plugin.NextOrFailure(state.Name(), a.Next, ctx, w, r) +} + +// matchWithPolicies matches the DNS query with a list of ACL polices and returns suitable +// action agains the query. +func matchWithPolicies(policies []policy, w dns.ResponseWriter, r *dns.Msg) action { + state := request.Request{W: w, Req: r} + + ip := net.ParseIP(state.IP()) + qtype := state.QType() + for _, policy := range policies { + // dns.TypeNone matches all query types. + _, matchAll := policy.qtypes[dns.TypeNone] + _, match := policy.qtypes[qtype] + if !matchAll && !match { + continue + } + + _, contained := policy.filter.GetByIP(ip) + if !contained { + continue + } + + // matched. + return policy.action + } + return actionNone +} + +// Name implements the plugin.Handler interface. +func (a ACL) Name() string { + return "acl" +} diff --git a/plugin/acl/acl_test.go b/plugin/acl/acl_test.go new file mode 100644 index 00000000000..9b23edc5384 --- /dev/null +++ b/plugin/acl/acl_test.go @@ -0,0 +1,396 @@ +package acl + +import ( + "context" + "testing" + + "github.com/coredns/coredns/plugin/test" + + "github.com/caddyserver/caddy" + "github.com/miekg/dns" +) + +type testResponseWriter struct { + test.ResponseWriter + Rcode int +} + +func (t *testResponseWriter) setRemoteIP(ip string) { + t.RemoteIP = ip +} + +// WriteMsg implement dns.ResponseWriter interface. +func (t *testResponseWriter) WriteMsg(m *dns.Msg) error { + t.Rcode = m.Rcode + return nil +} + +func NewTestControllerWithZones(input string, zones []string) *caddy.Controller { + ctr := caddy.NewTestController("dns", input) + for _, zone := range zones { + ctr.ServerBlockKeys = append(ctr.ServerBlockKeys, zone) + } + return ctr +} + +func TestACLServeDNS(t *testing.T) { + type args struct { + domain string + sourceIP string + qtype uint16 + } + tests := []struct { + name string + config string + zones []string + args args + wantRcode int + wantErr bool + }{ + // IPv4 tests. + { + "Blacklist 1 BLOCKED", + `acl example.org { + block type A net 192.168.0.0/16 + }`, + []string{}, + args{ + "www.example.org.", + "192.168.0.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 1 ALLOWED", + `acl example.org { + block type A net 192.168.0.0/16 + }`, + []string{}, + args{ + "www.example.org.", + "192.167.0.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Blacklist 2 BLOCKED", + ` + acl example.org { + block type * net 192.168.0.0/16 + }`, + []string{}, + args{ + "www.example.org.", + "192.168.0.2", + dns.TypeAAAA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 3 BLOCKED", + `acl example.org { + block type A + }`, + []string{}, + args{ + "www.example.org.", + "10.1.0.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 3 ALLOWED", + `acl example.org { + block type A + }`, + []string{}, + args{ + "www.example.org.", + "10.1.0.2", + dns.TypeAAAA, + }, + dns.RcodeSuccess, + false, + }, + { + "Blacklist 4 Single IP BLOCKED", + `acl example.org { + block type A net 192.168.1.2 + }`, + []string{}, + args{ + "www.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 4 Single IP ALLOWED", + `acl example.org { + block type A net 192.168.1.2 + }`, + []string{}, + args{ + "www.example.org.", + "192.168.1.3", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Whitelist 1 ALLOWED", + `acl example.org { + allow net 192.168.0.0/16 + block + }`, + []string{}, + args{ + "www.example.org.", + "192.168.0.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Whitelist 1 REFUSED", + `acl example.org { + allow type * net 192.168.0.0/16 + block + }`, + []string{}, + args{ + "www.example.org.", + "10.1.0.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 1 REFUSED", + `acl a.example.org { + block type * net 192.168.1.0/24 + }`, + []string{"example.org"}, + args{ + "a.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 1 ALLOWED", + `acl a.example.org { + block net 192.168.1.0/24 + }`, + []string{"example.org"}, + args{ + "www.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Fine-Grained 2 REFUSED", + `acl { + block net 192.168.1.0/24 + }`, + []string{"example.org"}, + args{ + "a.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 2 ALLOWED", + `acl { + block net 192.168.1.0/24 + }`, + []string{"example.org"}, + args{ + "a.example.com.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Fine-Grained 3 REFUSED", + `acl a.example.org { + block net 192.168.1.0/24 + } + acl b.example.org { + block type * net 192.168.2.0/24 + }`, + []string{"example.org"}, + args{ + "b.example.org.", + "192.168.2.2", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 3 ALLOWED", + `acl a.example.org { + block net 192.168.1.0/24 + } + acl b.example.org { + block net 192.168.2.0/24 + }`, + []string{"example.org"}, + args{ + "b.example.org.", + "192.168.1.2", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + // IPv6 tests. + { + "Blacklist 1 BLOCKED IPv6", + `acl example.org { + block type A net 2001:db8:abcd:0012::0/64 + }`, + []string{}, + args{ + "www.example.org.", + "2001:db8:abcd:0012::1230", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 1 ALLOWED IPv6", + `acl example.org { + block type A net 2001:db8:abcd:0012::0/64 + }`, + []string{}, + args{ + "www.example.org.", + "2001:db8:abcd:0013::0", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Blacklist 2 BLOCKED IPv6", + `acl example.org { + block type A + }`, + []string{}, + args{ + "www.example.org.", + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 3 Single IP BLOCKED IPv6", + `acl example.org { + block type A net 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + }`, + []string{}, + args{ + "www.example.org.", + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Blacklist 3 Single IP ALLOWED IPv6", + `acl example.org { + block type A net 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + }`, + []string{}, + args{ + "www.example.org.", + "2001:0db8:85a3:0000:0000:8a2e:0370:7335", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + { + "Fine-Grained 1 REFUSED IPv6", + `acl a.example.org { + block type * net 2001:db8:abcd:0012::0/64 + }`, + []string{"example.org"}, + args{ + "a.example.org.", + "2001:db8:abcd:0012:2019::0", + dns.TypeA, + }, + dns.RcodeRefused, + false, + }, + { + "Fine-Grained 1 ALLOWED IPv6", + `acl a.example.org { + block net 2001:db8:abcd:0012::0/64 + }`, + []string{"example.org"}, + args{ + "www.example.org.", + "2001:db8:abcd:0012:2019::0", + dns.TypeA, + }, + dns.RcodeSuccess, + false, + }, + } + + ctx := context.Background() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctr := NewTestControllerWithZones(tt.config, tt.zones) + a, err := parse(ctr) + a.Next = test.NextHandler(dns.RcodeSuccess, nil) + if err != nil { + t.Errorf("Error: Cannot parse acl from config: %v", err) + return + } + + w := &testResponseWriter{} + m := new(dns.Msg) + w.setRemoteIP(tt.args.sourceIP) + m.SetQuestion(tt.args.domain, tt.args.qtype) + _, err = a.ServeDNS(ctx, w, m) + if (err != nil) != tt.wantErr { + t.Errorf("Error: acl.ServeDNS() error = %v, wantErr %v", err, tt.wantErr) + return + } + if w.Rcode != tt.wantRcode { + t.Errorf("Error: acl.ServeDNS() Rcode = %v, want %v", w.Rcode, tt.wantRcode) + } + }) + } +} diff --git a/plugin/acl/metrics.go b/plugin/acl/metrics.go new file mode 100644 index 00000000000..442ea23744e --- /dev/null +++ b/plugin/acl/metrics.go @@ -0,0 +1,24 @@ +package acl + +import ( + "github.com/coredns/coredns/plugin" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // RequestBlockCount is the number of DNS requests being blocked. + RequestBlockCount = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: "dns", + Name: "request_block_count_total", + Help: "Counter of DNS requests being blocked.", + }, []string{"server", "zone"}) + // RequestAllowCount is the number of DNS requests being Allowed. + RequestAllowCount = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: "dns", + Name: "request_allow_count_total", + Help: "Counter of DNS requests being allowed.", + }, []string{"server"}) +) diff --git a/plugin/acl/setup.go b/plugin/acl/setup.go new file mode 100644 index 00000000000..1179175dd1f --- /dev/null +++ b/plugin/acl/setup.go @@ -0,0 +1,166 @@ +package acl + +import ( + "net" + "strings" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/metrics" + + "github.com/caddyserver/caddy" + "github.com/infobloxopen/go-trees/iptree" + "github.com/miekg/dns" +) + +func init() { + caddy.RegisterPlugin("acl", caddy.Plugin{ + ServerType: "dns", + Action: setup, + }) +} + +func newDefaultFilter() *iptree.Tree { + defaultFilter := iptree.NewTree() + _, IPv4All, _ := net.ParseCIDR("0.0.0.0/0") + _, IPv6All, _ := net.ParseCIDR("::/0") + defaultFilter.InplaceInsertNet(IPv4All, struct{}{}) + defaultFilter.InplaceInsertNet(IPv6All, struct{}{}) + return defaultFilter +} + +func setup(c *caddy.Controller) error { + a, err := parse(c) + if err != nil { + return plugin.Error("acl", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + a.Next = next + return a + }) + + // Register all metrics. + c.OnStartup(func() error { + metrics.MustRegister(c, RequestBlockCount, RequestAllowCount) + return nil + }) + return nil +} + +func parse(c *caddy.Controller) (ACL, error) { + a := ACL{} + for c.Next() { + r := rule{} + r.zones = c.RemainingArgs() + if len(r.zones) == 0 { + // if empty, the zones from the configuration block are used. + r.zones = make([]string, len(c.ServerBlockKeys)) + copy(r.zones, c.ServerBlockKeys) + } + for i := range r.zones { + r.zones[i] = plugin.Host(r.zones[i]).Normalize() + } + + for c.NextBlock() { + p := policy{} + + action := strings.ToLower(c.Val()) + if action == "allow" { + p.action = actionAllow + } else if action == "block" { + p.action = actionBlock + } else { + return a, c.Errf("unexpected token %q; expect 'allow' or 'block'", c.Val()) + } + + p.qtypes = make(map[uint16]struct{}) + p.filter = iptree.NewTree() + + hasTypeSection := false + hasNetSection := false + + remainingTokens := c.RemainingArgs() + for len(remainingTokens) > 0 { + if !isPreservedIdentifier(remainingTokens[0]) { + return a, c.Errf("unexpected token %q; expect 'type | net'", remainingTokens[0]) + } + section := strings.ToLower(remainingTokens[0]) + + i := 1 + var tokens []string + for ; i < len(remainingTokens) && !isPreservedIdentifier(remainingTokens[i]); i++ { + tokens = append(tokens, remainingTokens[i]) + } + remainingTokens = remainingTokens[i:] + + if len(tokens) == 0 { + return a, c.Errf("no token specified in %q section", section) + } + + switch section { + case "type": + hasTypeSection = true + for _, token := range tokens { + if token == "*" { + p.qtypes[dns.TypeNone] = struct{}{} + break + } + qtype, ok := dns.StringToType[token] + if !ok { + return a, c.Errf("unexpected token %q; expect legal QTYPE", token) + } + p.qtypes[qtype] = struct{}{} + } + case "net": + hasNetSection = true + for _, token := range tokens { + if token == "*" { + p.filter = newDefaultFilter() + break + } + token = normalize(token) + _, source, err := net.ParseCIDR(token) + if err != nil { + return a, c.Errf("illegal CIDR notation %q", token) + } + p.filter.InplaceInsertNet(source, struct{}{}) + } + default: + return a, c.Errf("unexpected token %q; expect 'type | net'", section) + } + } + + // optional `type` section means all record types. + if !hasTypeSection { + p.qtypes[dns.TypeNone] = struct{}{} + } + + // optional `net` means all ip addresses. + if !hasNetSection { + p.filter = newDefaultFilter() + } + + r.policies = append(r.policies, p) + } + a.Rules = append(a.Rules, r) + } + return a, nil +} + +func isPreservedIdentifier(token string) bool { + identifier := strings.ToLower(token) + return identifier == "type" || identifier == "net" +} + +// normalize appends '/32' for any single IPv4 address and '/128' for IPv6. +func normalize(rawNet string) string { + if idx := strings.IndexAny(rawNet, "/"); idx >= 0 { + return rawNet + } + + if idx := strings.IndexAny(rawNet, ":"); idx >= 0 { + return rawNet + "/128" + } + return rawNet + "/32" +} diff --git a/plugin/acl/setup_test.go b/plugin/acl/setup_test.go new file mode 100644 index 00000000000..f48da3f2466 --- /dev/null +++ b/plugin/acl/setup_test.go @@ -0,0 +1,245 @@ +package acl + +import ( + "testing" + + "github.com/caddyserver/caddy" +) + +func TestSetup(t *testing.T) { + tests := []struct { + name string + config string + wantErr bool + }{ + // IPv4 tests. + { + "Blacklist 1", + `acl { + block type A net 192.168.0.0/16 + }`, + false, + }, + { + "Blacklist 2", + `acl { + block type * net 192.168.0.0/16 + }`, + false, + }, + { + "Blacklist 3", + `acl { + block type A net * + }`, + false, + }, + { + "Blacklist 4", + `acl { + allow type * net 192.168.1.0/24 + block type * net 192.168.0.0/16 + }`, + false, + }, + { + "Whitelist 1", + `acl { + allow type * net 192.168.0.0/16 + block type * net * + }`, + false, + }, + { + "fine-grained 1", + `acl a.example.org { + block type * net 192.168.1.0/24 + }`, + false, + }, + { + "fine-grained 2", + `acl a.example.org { + block type * net 192.168.1.0/24 + } + acl b.example.org { + block type * net 192.168.2.0/24 + }`, + false, + }, + { + "Multiple Networks 1", + `acl example.org { + block type * net 192.168.1.0/24 192.168.3.0/24 + }`, + false, + }, + { + "Multiple Qtypes 1", + `acl example.org { + block type TXT ANY CNAME net 192.168.3.0/24 + }`, + false, + }, + { + "Missing argument 1", + `acl { + block A net 192.168.0.0/16 + }`, + true, + }, + { + "Missing argument 2", + `acl { + block type net 192.168.0.0/16 + }`, + true, + }, + { + "Illegal argument 1", + `acl { + block type ABC net 192.168.0.0/16 + }`, + true, + }, + { + "Illegal argument 2", + `acl { + blck type A net 192.168.0.0/16 + }`, + true, + }, + { + "Illegal argument 3", + `acl { + block type A net 192.168.0/16 + }`, + true, + }, + { + "Illegal argument 4", + `acl { + block type A net 192.168.0.0/33 + }`, + true, + }, + // IPv6 tests. + { + "Blacklist 1 IPv6", + `acl { + block type A net 2001:0db8:85a3:0000:0000:8a2e:0370:7334 + }`, + false, + }, + { + "Blacklist 2 IPv6", + `acl { + block type * net 2001:db8:85a3::8a2e:370:7334 + }`, + false, + }, + { + "Blacklist 3 IPv6", + `acl { + block type A + }`, + false, + }, + { + "Blacklist 4 IPv6", + `acl { + allow net 2001:db8:abcd:0012::0/64 + block net 2001:db8:abcd:0012::0/48 + }`, + false, + }, + { + "Whitelist 1 IPv6", + `acl { + allow net 2001:db8:abcd:0012::0/64 + block + }`, + false, + }, + { + "fine-grained 1 IPv6", + `acl a.example.org { + block net 2001:db8:abcd:0012::0/64 + }`, + false, + }, + { + "fine-grained 2 IPv6", + `acl a.example.org { + block net 2001:db8:abcd:0012::0/64 + } + acl b.example.org { + block net 2001:db8:abcd:0013::0/64 + }`, + false, + }, + { + "Multiple Networks 1 IPv6", + `acl example.org { + block net 2001:db8:abcd:0012::0/64 2001:db8:85a3::8a2e:370:7334/64 + }`, + false, + }, + { + "Illegal argument 1 IPv6", + `acl { + block type A net 2001::85a3::8a2e:370:7334 + }`, + true, + }, + { + "Illegal argument 2 IPv6", + `acl { + block type A net 2001:db8:85a3:::8a2e:370:7334 + }`, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctr := caddy.NewTestController("dns", tt.config) + if err := setup(ctr); (err != nil) != tt.wantErr { + t.Errorf("Error: setup() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestNormalize(t *testing.T) { + type args struct { + rawNet string + } + tests := []struct { + name string + args args + want string + }{ + { + "Network range 1", + args{"10.218.10.8/24"}, + "10.218.10.8/24", + }, + { + "IP address 1", + args{"10.218.10.8"}, + "10.218.10.8/32", + }, + { + "IPv6 address 1", + args{"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}, + "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := normalize(tt.args.rawNet); got != tt.want { + t.Errorf("Error: normalize() = %v, want %v", got, tt.want) + } + }) + } +} From d79562842aacfa5808ec0b8d160d720470ece060 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 5 Sep 2019 03:06:10 +0100 Subject: [PATCH 163/324] plugin/acl: whitespace cleanup (#3239) OWNERS file was padded, remove empty lines. Fix code block in README, and set codeblock type to 'corefile' without the upper case C. Signed-off-by: Miek Gieben --- plugin/acl/OWNERS | 1 - plugin/acl/README.md | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/plugin/acl/OWNERS b/plugin/acl/OWNERS index 5921fce191b..f43391c4352 100644 --- a/plugin/acl/OWNERS +++ b/plugin/acl/OWNERS @@ -4,4 +4,3 @@ reviewers: approvers: - miekg - ihac - diff --git a/plugin/acl/README.md b/plugin/acl/README.md index 49b6895a920..fe5582a6af3 100644 --- a/plugin/acl/README.md +++ b/plugin/acl/README.md @@ -27,7 +27,7 @@ To demonstrate the usage of plugin acl, here we provide some typical examples. Block all DNS queries with record type A from 192.168.0.0/16: -~~~ Corefile +~~~ corefile . { acl { block type A net 192.168.0.0/16 @@ -37,18 +37,18 @@ Block all DNS queries with record type A from 192.168.0.0/16: Block all DNS queries from 192.168.0.0/16 except for 192.168.1.0/24: -~~~ Corefile +~~~ corefile . { acl { allow net 192.168.1.0/24 block net 192.168.0.0/16 } } -``` +~~~ Allow only DNS queries from 192.168.0.0/24 and 192.168.1.0/24: -~~~ Corefile +~~~ corefile . { acl { allow net 192.168.0.0/16 192.168.1.0/24 @@ -59,7 +59,7 @@ Allow only DNS queries from 192.168.0.0/24 and 192.168.1.0/24: Block all DNS queries from 192.168.1.0/24 towards a.example.org: -~~~ Corefile +~~~ corefile example.org { acl a.example.org { block net 192.168.1.0/24 From 630d3d60b9fa3b9c1f070e4aa0504e50a85d70c2 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Thu, 5 Sep 2019 09:07:55 -0400 Subject: [PATCH 164/324] plugin/kubernetes: Handle multiple local IPs and bind (#3208) * use all local IPs * mult/bind ips * gofmt + boundIPs fix * fix no matching endpoint case * don't duplicate NS records in answer * fix answer dedup * fix comment * add multi local ip test case --- plugin/backend_lookup.go | 9 ++++- plugin/kubernetes/kubernetes.go | 10 ++--- plugin/kubernetes/kubernetes_apex_test.go | 4 +- plugin/kubernetes/kubernetes_test.go | 40 +++++++++++------- plugin/kubernetes/local.go | 49 ++++++++++++++--------- plugin/kubernetes/ns.go | 21 ++++++---- plugin/kubernetes/ns_test.go | 2 +- plugin/kubernetes/setup.go | 7 +++- 8 files changed, 91 insertions(+), 51 deletions(-) diff --git a/plugin/backend_lookup.go b/plugin/backend_lookup.go index 096cf806be7..9e5c9eeecbf 100644 --- a/plugin/backend_lookup.go +++ b/plugin/backend_lookup.go @@ -372,6 +372,8 @@ func NS(ctx context.Context, b ServiceBackend, zone string, state request.Reques // ... and reset state.Req.Question[0].Name = old + seen := map[string]bool{} + for _, serv := range services { what, ip := serv.HostType() switch what { @@ -380,8 +382,13 @@ func NS(ctx context.Context, b ServiceBackend, zone string, state request.Reques case dns.TypeA, dns.TypeAAAA: serv.Host = msg.Domain(serv.Key) - records = append(records, serv.NewNS(state.QName())) extra = append(extra, newAddress(serv, serv.Host, ip, what)) + ns := serv.NewNS(state.QName()) + if _, ok := seen[ns.Ns]; ok { + continue + } + seen[ns.Ns] = true + records = append(records, ns) } } return records, extra, nil diff --git a/plugin/kubernetes/kubernetes.go b/plugin/kubernetes/kubernetes.go index e7b36917e8d..68176d78c4d 100644 --- a/plugin/kubernetes/kubernetes.go +++ b/plugin/kubernetes/kubernetes.go @@ -43,11 +43,10 @@ type Kubernetes struct { Fall fall.F ttl uint32 opts dnsControlOpts - - primaryZoneIndex int - interfaceAddrsFunc func() net.IP - autoPathSearch []string // Local search path from /etc/resolv.conf. Needed for autopath. - TransferTo []string + primaryZoneIndex int + localIPs []net.IP + autoPathSearch []string // Local search path from /etc/resolv.conf. Needed for autopath. + TransferTo []string } // New returns a initialized Kubernetes. It default interfaceAddrFunc to return 127.0.0.1. All other @@ -56,7 +55,6 @@ func New(zones []string) *Kubernetes { k := new(Kubernetes) k.Zones = zones k.Namespaces = make(map[string]struct{}) - k.interfaceAddrsFunc = func() net.IP { return net.ParseIP("127.0.0.1") } k.podMode = podModeDisabled k.ttl = defaultTTL diff --git a/plugin/kubernetes/kubernetes_apex_test.go b/plugin/kubernetes/kubernetes_apex_test.go index 9a91ea2e2b8..5d2f4079b1b 100644 --- a/plugin/kubernetes/kubernetes_apex_test.go +++ b/plugin/kubernetes/kubernetes_apex_test.go @@ -2,6 +2,7 @@ package kubernetes import ( "context" + "net" "testing" "github.com/coredns/coredns/plugin/pkg/dnstest" @@ -63,6 +64,7 @@ func TestServeDNSApex(t *testing.T) { k := New([]string{"cluster.local."}) k.APIConn = &APIConnServeTest{} k.Next = test.NextHandler(dns.RcodeSuccess, nil) + k.localIPs = []net.IP{net.ParseIP("127.0.0.1")} ctx := context.TODO() for i, tc := range kubeApexCases { @@ -85,7 +87,7 @@ func TestServeDNSApex(t *testing.T) { } if err := test.SortAndCheck(resp, tc); err != nil { - t.Error(err) + t.Errorf("Test %d: %v", i, err) } } } diff --git a/plugin/kubernetes/kubernetes_test.go b/plugin/kubernetes/kubernetes_test.go index 4fd8e22a6ce..b259869a8b3 100644 --- a/plugin/kubernetes/kubernetes_test.go +++ b/plugin/kubernetes/kubernetes_test.go @@ -310,20 +310,28 @@ func TestServicesAuthority(t *testing.T) { key string } type svcTest struct { - interfaceAddrs func() net.IP - qname string - qtype uint16 - answer *svcAns + localIPs []net.IP + qname string + qtype uint16 + answer []svcAns } tests := []svcTest{ - {interfaceAddrs: func() net.IP { return net.ParseIP("127.0.0.1") }, qname: "ns.dns.interwebs.test.", qtype: dns.TypeA, answer: &svcAns{host: "127.0.0.1", key: "/" + coredns + "/test/interwebs/dns/ns"}}, - {interfaceAddrs: func() net.IP { return net.ParseIP("127.0.0.1") }, qname: "ns.dns.interwebs.test.", qtype: dns.TypeAAAA}, - {interfaceAddrs: func() net.IP { return net.ParseIP("::1") }, qname: "ns.dns.interwebs.test.", qtype: dns.TypeA}, - {interfaceAddrs: func() net.IP { return net.ParseIP("::1") }, qname: "ns.dns.interwebs.test.", qtype: dns.TypeAAAA, answer: &svcAns{host: "::1", key: "/" + coredns + "/test/interwebs/dns/ns"}}, + {localIPs: []net.IP{net.ParseIP("1.2.3.4")}, qname: "ns.dns.interwebs.test.", qtype: dns.TypeA, answer: []svcAns{{host: "1.2.3.4", key: "/" + coredns + "/test/interwebs/dns/ns"}}}, + {localIPs: []net.IP{net.ParseIP("1.2.3.4")}, qname: "ns.dns.interwebs.test.", qtype: dns.TypeAAAA}, + {localIPs: []net.IP{net.ParseIP("1:2::3:4")}, qname: "ns.dns.interwebs.test.", qtype: dns.TypeA}, + {localIPs: []net.IP{net.ParseIP("1:2::3:4")}, qname: "ns.dns.interwebs.test.", qtype: dns.TypeAAAA, answer: []svcAns{{host: "1:2::3:4", key: "/" + coredns + "/test/interwebs/dns/ns"}}}, + { + localIPs: []net.IP{net.ParseIP("1.2.3.4"), net.ParseIP("1:2::3:4")}, + qname: "ns.dns.interwebs.test.", + qtype: dns.TypeNS, answer: []svcAns{ + {host: "1.2.3.4", key: "/" + coredns + "/test/interwebs/dns/ns"}, + {host: "1:2::3:4", key: "/" + coredns + "/test/interwebs/dns/ns"}, + }, + }, } for i, test := range tests { - k.interfaceAddrsFunc = test.interfaceAddrs + k.localIPs = test.localIPs state := request.Request{ Req: &dns.Msg{Question: []dns.Question{{Name: test.qname, Qtype: test.qtype}}}, @@ -334,7 +342,7 @@ func TestServicesAuthority(t *testing.T) { t.Errorf("Test %d: got error '%v'", i, e) continue } - if test.answer != nil && len(svcs) != 1 { + if test.answer != nil && len(svcs) != len(test.answer) { t.Errorf("Test %d, expected 1 answer, got %v", i, len(svcs)) continue } @@ -347,11 +355,13 @@ func TestServicesAuthority(t *testing.T) { continue } - if test.answer.host != svcs[0].Host { - t.Errorf("Test %d, expected host '%v', got '%v'", i, test.answer.host, svcs[0].Host) - } - if test.answer.key != svcs[0].Key { - t.Errorf("Test %d, expected key '%v', got '%v'", i, test.answer.key, svcs[0].Key) + for i, answer := range test.answer { + if answer.host != svcs[i].Host { + t.Errorf("Test %d, expected host '%v', got '%v'", i, answer.host, svcs[i].Host) + } + if answer.key != svcs[i].Key { + t.Errorf("Test %d, expected key '%v', got '%v'", i, answer.key, svcs[i].Key) + } } } } diff --git a/plugin/kubernetes/local.go b/plugin/kubernetes/local.go index 199af6f0dba..d092550613c 100644 --- a/plugin/kubernetes/local.go +++ b/plugin/kubernetes/local.go @@ -2,41 +2,54 @@ package kubernetes import ( "net" + + "github.com/caddyserver/caddy" + "github.com/coredns/coredns/core/dnsserver" ) -func localPodIP() net.IP { - addrs, err := net.InterfaceAddrs() - if err != nil { - return nil +// boundIPs returns the list of non-loopback IPs that CoreDNS is bound to +func boundIPs(c *caddy.Controller) (ips []net.IP) { + conf := dnsserver.GetConfig(c) + hosts := conf.ListenHosts + if hosts == nil || hosts[0] == "" { + hosts = nil + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil + } + for _, addr := range addrs { + hosts = append(hosts, addr.String()) + } } - - for _, addr := range addrs { - ip, _, _ := net.ParseCIDR(addr.String()) + for _, host := range hosts { + ip, _, _ := net.ParseCIDR(host) ip4 := ip.To4() if ip4 != nil && !ip4.IsLoopback() { - return ip4 + ips = append(ips, ip4) + continue } ip6 := ip.To16() if ip6 != nil && !ip6.IsLoopback() { - return ip6 + ips = append(ips, ip6) } } - return nil + return ips } // LocalNodeName is exclusively used in federation plugin, will be deprecated later. func (k *Kubernetes) LocalNodeName() string { - localIP := k.interfaceAddrsFunc() - if localIP == nil { + if len(k.localIPs) == 0 { return "" } - // Find endpoint matching localIP - for _, ep := range k.APIConn.EpIndexReverse(localIP.String()) { - for _, eps := range ep.Subsets { - for _, addr := range eps.Addresses { - if localIP.Equal(net.ParseIP(addr.IP)) { - return addr.NodeName + // Find fist endpoint matching any localIP + for _, localIP := range k.localIPs { + for _, ep := range k.APIConn.EpIndexReverse(localIP.String()) { + for _, eps := range ep.Subsets { + for _, addr := range eps.Addresses { + if localIP.Equal(net.ParseIP(addr.IP)) { + return addr.NodeName + } } } } diff --git a/plugin/kubernetes/ns.go b/plugin/kubernetes/ns.go index 4774e176f9a..f21890cc994 100644 --- a/plugin/kubernetes/ns.go +++ b/plugin/kubernetes/ns.go @@ -21,15 +21,10 @@ func (k *Kubernetes) nsAddrs(external bool, zone string) []dns.RR { svcIPs []net.IP ) - // Find the CoreDNS Endpoint - localIP := k.interfaceAddrsFunc() - endpoints := k.APIConn.EpIndexReverse(localIP.String()) + // Find the CoreDNS Endpoints + for _, localIP := range k.localIPs { + endpoints := k.APIConn.EpIndexReverse(localIP.String()) - // If the CoreDNS Endpoint is not found, use the locally bound IP address - if len(endpoints) == 0 { - svcNames = []string{defaultNSName + zone} - svcIPs = []net.IP{localIP} - } else { // Collect IPs for all Services of the Endpoints for _, endpoint := range endpoints { svcs := k.APIConn.SvcIndex(object.ServiceKey(endpoint.Name, endpoint.Namespace)) @@ -59,6 +54,16 @@ func (k *Kubernetes) nsAddrs(external bool, zone string) []dns.RR { } } + // If no local IPs matched any endpoints, use the localIPs directly + if len(svcIPs) == 0 { + svcIPs = make([]net.IP, len(k.localIPs)) + svcNames = make([]string, len(k.localIPs)) + for i, localIP := range k.localIPs { + svcNames[i] = defaultNSName + zone + svcIPs[i] = localIP + } + } + // Create an RR slice of collected IPs var rrs []dns.RR rrs = make([]dns.RR, len(svcIPs)) diff --git a/plugin/kubernetes/ns_test.go b/plugin/kubernetes/ns_test.go index af057c254db..5f9786a74ea 100644 --- a/plugin/kubernetes/ns_test.go +++ b/plugin/kubernetes/ns_test.go @@ -111,7 +111,7 @@ func TestNsAddrs(t *testing.T) { k := New([]string{"inter.webs.test."}) k.APIConn = &APIConnTest{} - k.interfaceAddrsFunc = func() net.IP { return net.ParseIP("10.244.0.20") } + k.localIPs = []net.IP{net.ParseIP("10.244.0.20")} cdrs := k.nsAddrs(false, k.Zones[0]) diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go index eb33936fa77..1309139d738 100644 --- a/plugin/kubernetes/setup.go +++ b/plugin/kubernetes/setup.go @@ -61,6 +61,12 @@ func setup(c *caddy.Controller) error { return k }) + // get locally bound addresses + c.OnStartup(func() error { + k.localIPs = boundIPs(c) + return nil + }) + return nil } @@ -113,7 +119,6 @@ func kubernetesParse(c *caddy.Controller) (*Kubernetes, error) { func ParseStanza(c *caddy.Controller) (*Kubernetes, error) { k8s := New([]string{""}) - k8s.interfaceAddrsFunc = localPodIP k8s.autoPathSearch = searchFromResolvConf() opts := dnsControlOpts{ From 41115fb93d4f806d14ad04044ac8ee9d1820052e Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Thu, 5 Sep 2019 11:41:13 -0700 Subject: [PATCH 165/324] Update README.md (#3242) Minor textual cleanup. --- plugin/loadbalance/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin/loadbalance/README.md b/plugin/loadbalance/README.md index b29ba4c08ec..81a6580c40f 100644 --- a/plugin/loadbalance/README.md +++ b/plugin/loadbalance/README.md @@ -2,14 +2,14 @@ ## Name -*loadbalance* - randomize the order of A, AAAA and MX records. +*loadbalance* - randomizes the order of A, AAAA and MX records. ## Description -The *loadbalance* will act as a round-robin DNS loadbalancer by randomizing the order of A, AAAA, +The *loadbalance* will act as a round-robin DNS load balancer by randomizing the order of A, AAAA, and MX records in the answer. -See [Wikipedia](https://en.wikipedia.org/wiki/Round-robin_DNS) about the pros and cons on this +See [Wikipedia](https://en.wikipedia.org/wiki/Round-robin_DNS) about the pros and cons of this setup. It will take care to sort any CNAMEs before any address records, because some stub resolver implementations (like glibc) are particular about that. @@ -19,7 +19,7 @@ implementations (like glibc) are particular about that. loadbalance [POLICY] ~~~ -* **POLICY** is how to balance, the default, and only option, is "round_robin". +* **POLICY** is how to balance. The default, and only option, is "round_robin". ## Examples From e4df752b17a908c0cef9d3f91cb45856466a08dc Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Fri, 6 Sep 2019 09:31:05 -0700 Subject: [PATCH 166/324] Update go mod tidy (and fix for Azure/go-autorest & azure-sdk-for-go) (#3258) See https://github.com/coredns/coredns/pull/3209#issuecomment-525016357 for the previous issues, also see https://github.com/Azure/go-autorest/issues/414 for some further background. Basically go mod has some issue when multiple tags exists (as main module and submodule) within the same repo. That was fixed in go-autorest very recently, though azure-sdk-for-go has not been fully updated yet. This fix is a temporarily one with `replace` fix. Once azure-sdk-for-go is updated then we could drop the `replace fix` Signed-off-by: Yong Tang --- go.mod | 11 ++++--- go.sum | 64 ++++++++++++----------------------------- plugin/chaos/zowners.go | 2 +- 3 files changed, 25 insertions(+), 52 deletions(-) diff --git a/go.mod b/go.mod index bfa3445762f..c85b9e0b745 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible - github.com/Azure/go-autorest v13.0.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.9.0 github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect @@ -14,10 +13,8 @@ require ( github.com/aws/aws-sdk-go v1.23.13 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe - github.com/coreos/bbolt v1.3.2 // indirect github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect - github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 github.com/evanphx/json-patch v4.1.0+incompatible // indirect github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 @@ -39,9 +36,8 @@ require ( github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/common v0.6.0 - github.com/seiflotfy/cuckoofilter v0.0.0-20190302225222-764cb5258d9b github.com/sirupsen/logrus v1.4.2 // indirect - github.com/spf13/cobra v0.0.5 // indirect + github.com/spf13/pflag v1.0.3 // indirect github.com/tinylib/msgp v1.1.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c @@ -61,4 +57,7 @@ require ( k8s.io/utils v0.0.0-20190529001817-6999998975a7 // indirect ) -replace github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.16 +replace ( + github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible + github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.16 +) diff --git a/go.sum b/go.sum index 155e15813d7..45db39e858b 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,6 @@ cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= github.com/Azure/azure-sdk-for-go v31.1.0+incompatible h1:5SzgnfAvUBdBwNTN23WLfZoCt/rGhLvd7QdCAaFXgY4= github.com/Azure/azure-sdk-for-go v31.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-autorest v11.1.2+incompatible h1:viZ3tV5l4gE2Sw0xrasFHytCGtzYCrT+um/rrSQ1BfA= -github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezqVhr2h6t9LJIEKVO0= github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= @@ -37,14 +35,12 @@ github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Shopify/sarama v1.21.0 h1:0GKs+e8mn1RRUzfg9oUXv3v7ZieQLmOZF/bfnmmGhM8= github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/aws/aws-sdk-go v1.23.8 h1:G/azJoBN0pnhB3B+0eeC4yyVFYIIad6bbzg6wwtImqk= -github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -59,14 +55,10 @@ github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1q github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9W4dScCwdRESOacCCdF3FkuB5pBIOv1U= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -75,7 +67,6 @@ github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -83,7 +74,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4= @@ -107,6 +97,7 @@ github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710/go. github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= @@ -121,6 +112,7 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= @@ -136,6 +128,7 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pO github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -162,14 +155,11 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -178,7 +168,6 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -190,6 +179,7 @@ github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062/go.mod h1:Pc github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= @@ -203,17 +193,20 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -226,7 +219,6 @@ github.com/miekg/dns v1.1.16 h1:iMEQ/IVHxPTtx2Q07JP/k4CKRvSjiAZjZ0hnhgYEDmE= github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -250,14 +242,15 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= @@ -280,44 +273,33 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhD github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/seiflotfy/cuckoofilter v0.0.0-20190302225222-764cb5258d9b/go.mod h1:ET5mVvNjwaGXRgZxO9UZr7X+8eAf87AfIYNwRSp9s4Y= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c h1:cmCkyKubPxaCrx7tK7JZbgVkzLIxUgsZyfkvnCUgs08= go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c/go.mod h1:tQYIqsNuGzkF9ncfEtoEX0qkoBhzw6ih5N1xcdGnvek= -go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ= -go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -325,22 +307,17 @@ go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443 h1:IcSOAf4PyMp3U3XbIEj1/xJ2BjNN2jWv7JoyOsMxXUU= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -369,11 +346,8 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 h1:HOTjhHFecQCpFnEhQ4MAH6Gf7yGklZnqaHvtezTKqnQ= golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -393,7 +367,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -405,7 +378,6 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190618155005-516e3c20635f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= @@ -441,13 +413,13 @@ google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190611190212-a7e196e89fd3/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190626174449-989357319d63 h1:UsSJe9fhWNSz6emfIGPpH5DF23t7ALo2Pf3sC+/hsdg= google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD5x+fRy6U0SXxZehzUHRSYUJcKfII= @@ -461,6 +433,7 @@ gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -473,6 +446,7 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/plugin/chaos/zowners.go b/plugin/chaos/zowners.go index 41b8f690eec..c423fc0c4a9 100644 --- a/plugin/chaos/zowners.go +++ b/plugin/chaos/zowners.go @@ -1,4 +1,4 @@ package chaos // Owners are all GitHub handlers of all maintainers. -var Owners = []string{"bradbeam", "chrisohaver", "darshanime", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "yongtang"} +var Owners = []string{"bradbeam", "chrisohaver", "darshanime", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "ihac", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "yongtang"} From c9e00b9edf5377fcb78be66e3e63408c806d70b5 Mon Sep 17 00:00:00 2001 From: Kasisnu Date: Sat, 7 Sep 2019 16:32:39 +0530 Subject: [PATCH 167/324] Pin golang.org/x/net in go.mod (#3260) Signed-off-by: kasisnu --- go.mod | 1 + go.sum | 2 ++ 2 files changed, 3 insertions(+) diff --git a/go.mod b/go.mod index c85b9e0b745..bae9155004c 100644 --- a/go.mod +++ b/go.mod @@ -60,4 +60,5 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.16 + golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 => golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 ) diff --git a/go.sum b/go.sum index 45db39e858b..c3ac73388d5 100644 --- a/go.sum +++ b/go.sum @@ -350,6 +350,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 h1:HOTjhHFecQCpFnEhQ4MAH6Gf7yGklZnqaHvtezTKqnQ= golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= From ac2365da7762947ddd3247e566e04518aeacd527 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sat, 7 Sep 2019 04:30:29 -0700 Subject: [PATCH 168/324] Bump Travis CI to use golang 1.13.x (#3262) Automatically submitted. --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e29b074b35a..f7eea380091 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ services: - docker language: go go: - - "1.12.x" + - "1.13.x" cache: directories: From 878f906854ee23fd30531b360b1a10fccda3005c Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:23:07 -0700 Subject: [PATCH 169/324] Making README consistent with other plugins' READMEs (#3243) --- plugin/sign/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/sign/README.md b/plugin/sign/README.md index 4753598d4b5..a28bf90d1e9 100644 --- a/plugin/sign/README.md +++ b/plugin/sign/README.md @@ -2,7 +2,7 @@ ## Name -*sign* - add DNSSEC records to zone files. +*sign* - adds DNSSEC records to zone files. ## Description From 31fd9328cd066877f451c6ea3918b86f6d002208 Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:23:20 -0700 Subject: [PATCH 170/324] Making README consistent, fixing "meta data" (#3244) --- plugin/metadata/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugin/metadata/README.md b/plugin/metadata/README.md index e9ca22584d5..9a08bb91a1c 100644 --- a/plugin/metadata/README.md +++ b/plugin/metadata/README.md @@ -2,25 +2,25 @@ ## Name -*metadata* - enable a meta data collector. +*metadata* - enables a metadata collector. ## Description By enabling *metadata* any plugin that implements [metadata.Provider interface](https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider) will be called for -each DNS query, at beginning of the process for that query, in order to add its own meta data to +each DNS query, at the beginning of the process for that query, in order to add its own metadata to context. -The meta data collected will be available for all plugins, via the Context parameter provided in the +The metadata collected will be available for all plugins, via the Context parameter provided in the ServeDNS function. The package (code) documentation has examples on how to inspect and retrieve metadata a plugin might be interested in. -The meta data is added by setting a label with a value in the context. These labels should be named +The metadata is added by setting a label with a value in the context. These labels should be named `plugin/NAME`, where **NAME** is something descriptive. The only hard requirement the *metadata* -plugin enforces is that the labels contains a slash. See the documentation for +plugin enforces is that the labels contain a slash. See the documentation for `metadata.SetValueFunc`. -The value stored is a string. The empty string signals "no meta data". See the documentation for +The value stored is a string. The empty string signals "no metadata". See the documentation for `metadata.ValueFunc` on how to retrieve this. ## Syntax From db489056b84717049c4d211e5152b39dea578566 Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:23:38 -0700 Subject: [PATCH 171/324] Making README consistent with other plugins' READMEs (#3245) --- plugin/loop/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/loop/README.md b/plugin/loop/README.md index eb3a1ee90fc..b0fb4835d4c 100644 --- a/plugin/loop/README.md +++ b/plugin/loop/README.md @@ -2,7 +2,7 @@ ## Name -*loop* - detect simple forwarding loops and halt the server. +*loop* - detects simple forwarding loops and halts the server. ## Description From 47b8ce1fffb0999498b4f8d776834ea7592e1fa7 Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:23:54 -0700 Subject: [PATCH 172/324] Fixing the top-level description (#3246) --- plugin/kubernetes/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/kubernetes/README.md b/plugin/kubernetes/README.md index 15af2565c91..c844bc0c2de 100644 --- a/plugin/kubernetes/README.md +++ b/plugin/kubernetes/README.md @@ -2,7 +2,7 @@ ## Name -*kubernetes* - enables the reading zone data from a Kubernetes cluster. +*kubernetes* - enables reading zone data from a Kubernetes cluster. ## Description From ea66273463064c28bdbc91b58a1354ac3ec4b8b2 Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:24:43 -0700 Subject: [PATCH 173/324] Making README consistent with other plugins' READMEs (#3247) --- plugin/k8s_external/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/k8s_external/README.md b/plugin/k8s_external/README.md index 5960265236e..dc251ab02ff 100644 --- a/plugin/k8s_external/README.md +++ b/plugin/k8s_external/README.md @@ -2,7 +2,7 @@ ## Name -*k8s_external* - resolve load balancer and external IPs from outside kubernetes clusters. +*k8s_external* - resolves load balancer and external IPs from outside Kubernetes clusters. ## Description From 782994583beb779c28c53a9170e4952f352d9084 Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:25:17 -0700 Subject: [PATCH 174/324] Making README consistent with other plugins' READMEs (#3248) --- plugin/import/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/import/README.md b/plugin/import/README.md index 7c6a66999d3..76906d9fa4b 100644 --- a/plugin/import/README.md +++ b/plugin/import/README.md @@ -2,11 +2,11 @@ ## Name -*import* - include files or reference snippets from a Corefile. +*import* - includes files or references snippets from a Corefile. ## Description -The *import* plugin can be used to include files into the main configuration. Another use it to +The *import* plugin can be used to include files into the main configuration. Another use is to reference predefined snippets. Both can help to avoid some duplication. This is a unique directive in that *import* can appear outside of a server block. In other words, it From 533994959601e99f680eac56ec89a2bd93f6fc3a Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:25:26 -0700 Subject: [PATCH 175/324] Making README consistent with other plugins' READMEs (#3249) --- plugin/etcd/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/etcd/README.md b/plugin/etcd/README.md index a7f44d53910..3b909fcac3d 100644 --- a/plugin/etcd/README.md +++ b/plugin/etcd/README.md @@ -2,7 +2,7 @@ ## Name -*etcd* - enable SkyDNS service discovery from etcd. +*etcd* - enables SkyDNS service discovery from etcd. ## Description @@ -10,7 +10,7 @@ The *etcd* plugin implements the (older) SkyDNS service discovery service. It is a generic DNS zone data plugin. Only a subset of DNS record types are implemented, and subdomains and delegations are not handled at all. -The data in etcd instance has to be encoded as +The data in the etcd instance has to be encoded as a [message](https://github.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service.go#L26) like [SkyDNS](https://github.com/skynetservices/skydns). It works just like SkyDNS. From 9dc2e3162c259bc980a6fec23ed10892d4590b7e Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:25:35 -0700 Subject: [PATCH 176/324] Making README consistent with other plugins' READMEs (#3250) --- plugin/errors/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/errors/README.md b/plugin/errors/README.md index 8b1449ea308..5feef2694a9 100644 --- a/plugin/errors/README.md +++ b/plugin/errors/README.md @@ -2,7 +2,7 @@ ## Name -*errors* - enable error logging. +*errors* - enables error logging. ## Description From 9b192564ac43160eb0a20481a182feae768850e2 Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:25:58 -0700 Subject: [PATCH 177/324] Making README consistent with other plugins' READMEs (#3251) --- plugin/dnstap/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/dnstap/README.md b/plugin/dnstap/README.md index 81152e7422b..afcd19bf272 100644 --- a/plugin/dnstap/README.md +++ b/plugin/dnstap/README.md @@ -2,11 +2,11 @@ ## Name -*dnstap* - enable logging to dnstap. +*dnstap* - enables logging to dnstap. ## Description -dnstap is a flexible, structured binary log format for DNS software: http://dnstap.info. With this +dnstap is a flexible, structured binary log format for DNS software; see http://dnstap.info. With this plugin you make CoreDNS output dnstap logging. Note that there is an internal buffer, so expect at least 13 requests before the server sends its From b156e0440f81d3648ec08960de1371b257b18c73 Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:26:06 -0700 Subject: [PATCH 178/324] Making dnssec's README consistent with other plugins' READMEs (#3252) --- plugin/dnssec/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/dnssec/README.md b/plugin/dnssec/README.md index c8e17bf4f73..abb222dd786 100644 --- a/plugin/dnssec/README.md +++ b/plugin/dnssec/README.md @@ -2,11 +2,11 @@ ## Name -*dnssec* - enable on-the-fly DNSSEC signing of served data. +*dnssec* - enables on-the-fly DNSSEC signing of served data. ## Description -With *dnssec* any reply that doesn't (or can't) do DNSSEC will get signed on the fly. Authenticated +With *dnssec*, any reply that doesn't (or can't) do DNSSEC will get signed on the fly. Authenticated denial of existence is implemented with NSEC black lies. Using ECDSA as an algorithm is preferred as this leads to smaller signatures (compared to RSA). NSEC3 is *not* supported. From ab59b28c4fde2b93ee73dac0edec0724d41aa21d Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:26:16 -0700 Subject: [PATCH 179/324] Making cancel's README consistent with other plugins' READMEs (#3253) --- plugin/cancel/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/cancel/README.md b/plugin/cancel/README.md index 1615f640944..9f7c470d940 100644 --- a/plugin/cancel/README.md +++ b/plugin/cancel/README.md @@ -2,14 +2,14 @@ ## Name -*cancel* - a plugin that cancels a request's context after 5001 milliseconds. +*cancel* - cancels a request's context after 5001 milliseconds. ## Description The *cancel* plugin creates a canceling context for each request. It adds a timeout that gets triggered after 5001 milliseconds. -The 5001 number is chosen because the default timeout for DNS clients is 5 seconds, after that they +The 5001 number was chosen because the default timeout for DNS clients is 5 seconds, after that they give up. A plugin interested in the cancellation status should call `plugin.Done()` on the context. If the From acabfc5e9eb1443709352571ebf55880e94cfa8a Mon Sep 17 00:00:00 2001 From: Cricket Liu Date: Sun, 8 Sep 2019 00:26:25 -0700 Subject: [PATCH 180/324] Making README text consistent with other plugins' READMEs (#3254) --- plugin/any/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/any/README.md b/plugin/any/README.md index 1abc978030e..5b88467459d 100644 --- a/plugin/any/README.md +++ b/plugin/any/README.md @@ -3,7 +3,7 @@ ## Name -*any* - give a minimal response to ANY queries. +*any* - gives a minimal response to ANY queries. ## Description From 70f2bd1dfffcb8ab725dd459daa6ec11a4269391 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Sun, 8 Sep 2019 03:28:30 -0400 Subject: [PATCH 181/324] dont duplicate service recrod for every port (#3240) --- plugin/kubernetes/xfr.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/plugin/kubernetes/xfr.go b/plugin/kubernetes/xfr.go index 7ea99fd8daa..9ad6c9aca46 100644 --- a/plugin/kubernetes/xfr.go +++ b/plugin/kubernetes/xfr.go @@ -110,14 +110,15 @@ func (k *Kubernetes) transfer(c chan dns.RR, zone string) { case api.ServiceTypeClusterIP, api.ServiceTypeNodePort, api.ServiceTypeLoadBalancer: clusterIP := net.ParseIP(svc.ClusterIP) if clusterIP != nil { - for _, p := range svc.Ports { + s := msg.Service{Host: svc.ClusterIP, TTL: k.ttl} + s.Key = strings.Join(svcBase, "/") - s := msg.Service{Host: svc.ClusterIP, Port: int(p.Port), TTL: k.ttl} - s.Key = strings.Join(svcBase, "/") + // Change host from IP to Name for SRV records + host := emitAddressRecord(c, s) - // Change host from IP to Name for SRV records - host := emitAddressRecord(c, s) - s.Host = host + for _, p := range svc.Ports { + s := msg.Service{Host: host, Port: int(p.Port), TTL: k.ttl} + s.Key = strings.Join(svcBase, "/") // Need to generate this to handle use cases for peer-finder // ref: https://github.com/coredns/coredns/pull/823 From dfb9c9516f6f6f0799bbacb5c06a45e0788a575d Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2019 09:52:38 +0000 Subject: [PATCH 182/324] build(deps): bump github.com/aws/aws-sdk-go from 1.23.13 to 1.23.17 (#3266) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.23.13 to 1.23.17. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.23.13...v1.23.17) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index bae9155004c..3f68e79726c 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.23.13 + github.com/aws/aws-sdk-go v1.23.17 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index c3ac73388d5..db31db43249 100644 --- a/go.sum +++ b/go.sum @@ -43,6 +43,8 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.17 h1:IGNAvtR7ckMEHhy+ObG9xw6DFqEE4Ual0LYXsVTZSLQ= +github.com/aws/aws-sdk-go v1.23.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 37f5d489c6fc7d53f67b75a0fa992485bb73ed07 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2019 09:52:48 +0000 Subject: [PATCH 183/324] build(deps): bump github.com/Azure/go-autorest/autorest (#3268) Bumps [github.com/Azure/go-autorest/autorest](https://github.com/Azure/go-autorest) from 0.9.0 to 0.9.1. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/autorest/v0.9.0...autorest/v0.9.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 3f68e79726c..a09c9ce5074 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible - github.com/Azure/go-autorest/autorest v0.9.0 + github.com/Azure/go-autorest/autorest v0.9.1 github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect diff --git a/go.sum b/go.sum index db31db43249..24aef434139 100644 --- a/go.sum +++ b/go.sum @@ -9,6 +9,8 @@ github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezq github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.1 h1:JB7Mqhna/7J8gZfVHjxDSTLSD6ciz2YgSMb/4qLXTtY= +github.com/Azure/go-autorest/autorest v0.9.1/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= From d3403ff31179981bae145da4f8e6861bd278ccd5 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2019 09:52:59 +0000 Subject: [PATCH 184/324] build(deps): bump google.golang.org/api from 0.9.0 to 0.10.0 (#3267) Bumps [google.golang.org/api](https://github.com/google/google-api-go-client) from 0.9.0 to 0.10.0. - [Release notes](https://github.com/google/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/master/CHANGES.md) - [Commits](https://github.com/google/google-api-go-client/compare/v0.9.0...v0.10.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index a09c9ce5074..05f351ff2d9 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect - google.golang.org/api v0.9.0 + google.golang.org/api v0.10.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.23.0 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 diff --git a/go.sum b/go.sum index 24aef434139..24be5d5688a 100644 --- a/go.sum +++ b/go.sum @@ -416,6 +416,8 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 368440d6b20c3663f6afc2eb2495e08733960a54 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2019 09:53:34 +0000 Subject: [PATCH 185/324] build(deps): bump gopkg.in/DataDog/dd-trace-go.v1 from 1.16.1 to 1.17.0 (#3265) Bumps [gopkg.in/DataDog/dd-trace-go.v1](https://github.com/DataDog/dd-trace-go) from 1.16.1 to 1.17.0. - [Release notes](https://github.com/DataDog/dd-trace-go/releases) - [Commits](https://github.com/DataDog/dd-trace-go/compare/v1.16.1...v1.17.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 05f351ff2d9..13557c537ba 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( google.golang.org/api v0.10.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.23.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 + gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 diff --git a/go.sum b/go.sum index 24be5d5688a..e7f7ad092e4 100644 --- a/go.sum +++ b/go.sum @@ -439,6 +439,8 @@ google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4nH2UWo= gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54= +gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= From be923eaf9bba15af46d36953d250cbfa546de1ec Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 9 Sep 2019 14:41:50 +0100 Subject: [PATCH 186/324] default config: add log (#3269) When there is no Corefile found we load the default. Add the log plugin to it, so you can see queries actually landing in CoreDNS. Signed-off-by: Miek Gieben --- core/dnsserver/register.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/dnsserver/register.go b/core/dnsserver/register.go index 4b2cd95fc8f..27144385700 100644 --- a/core/dnsserver/register.go +++ b/core/dnsserver/register.go @@ -28,7 +28,7 @@ func init() { DefaultInput: func() caddy.Input { return caddy.CaddyfileInput{ Filepath: "Corefile", - Contents: []byte(".:" + Port + " {\nwhoami\n}\n"), + Contents: []byte(".:" + Port + " {\nwhoami\nlog\n}\n"), ServerTypeName: serverType, } }, From 6579d60b089b7eb84067445434ac7f956850332d Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Mon, 9 Sep 2019 23:22:43 -0700 Subject: [PATCH 187/324] Fix RFC link for RFC8484 (#3271) This fix fixes 3270 Signed-off-by: Yong Tang --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 11057354d4c..d2508c830f8 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ provided out of the box you can add it by [writing a plugin](https://coredns.io/ CoreDNS can listen for DNS requests coming in over UDP/TCP (go'old DNS), TLS ([RFC 7858](https://tools.ietf.org/html/rfc7858)), also called DoT, DNS over HTTP/2 - DoH - -([RFC 8484](https://tools.ietf.org/html/rfc7858)) and [gRPC](https://grpc.io) (not a standard). +([RFC 8484](https://tools.ietf.org/html/rfc8484)) and [gRPC](https://grpc.io) (not a standard). Currently CoreDNS is able to: From a52a427b4ae12023142a389df65d6c7ef9864a94 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 10 Sep 2019 11:41:56 +0100 Subject: [PATCH 188/324] stop fuzzing: its build is broken (#3272) Automatically submitted. --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index f7eea380091..e10ff20c5ee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,8 +27,8 @@ env: - TEST_TYPE=integration - TEST_TYPE=core - TEST_TYPE=plugin - - TEST_TYPE=fuzzit FUZZIT_TYPE=local-regression - - TEST_TYPE=fuzzit FUZZIT_TYPE=fuzzing +# - TEST_TYPE=fuzzit FUZZIT_TYPE=local-regression +# - TEST_TYPE=fuzzit FUZZIT_TYPE=fuzzing # In the Travis VM-based build environment, IPv6 networking is not # enabled by default. The sysctl operations below enable IPv6. From ac36144f0730b84530814ba08d2c84ed8a508e1b Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Tue, 10 Sep 2019 11:34:08 -0400 Subject: [PATCH 189/324] circleci: updates for minikube -> kind transition (#3274) * updates for minikube -> kind transition Signed-off-by: Chris O'Haver * update container build Signed-off-by: Chris O'Haver --- .circleci/config.yml | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 79f6d2e47c5..c0735ddc5f1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -20,18 +20,14 @@ integrationDefaults: &integrationDefaults image: ubuntu-1604:201903-01 working_directory: ~/go/src/${CIRCLE_PROJECT_USERNAME}/coredns environment: - - K8S_VERSION: v1.13.3 - - KUBECONFIG: /home/circleci/.kube/config - - MINIKUBE_VERSION: v0.33.1 - - MINIKUBE_WANTUPDATENOTIFICATION: false - - MINIKUBE_WANTREPORTERRORPROMPT: false - - CHANGE_MINIKUBE_NONE_USER: true - - MINIKUBE_HOME: /home/circleci + - K8S_VERSION: v1.15.3 + - KIND_VERSION: v0.5.1 + - KUBECONFIG: /home/circleci/.kube/kind-config-kind setupKubernetes: &setupKubernetes - run: name: Setup Kubernetes - command: ~/go/src/${CIRCLE_PROJECT_USERNAME}/ci/build/kubernetes/minikube_setup.sh + command: ~/go/src/${CIRCLE_PROJECT_USERNAME}/ci/build/kubernetes/k8s_setup.sh buildCoreDNSImage: &buildCoreDNSImage - run: @@ -40,8 +36,7 @@ buildCoreDNSImage: &buildCoreDNSImage cd ~/go/src/${CIRCLE_PROJECT_USERNAME}/coredns make coredns SYSTEM="GOOS=linux" && \ docker build -t coredns . && \ - docker tag coredns localhost:5000/coredns && \ - docker push localhost:5000/coredns + kind load docker-image coredns jobs: kubernetes-tests: From 88d25cdc2038f06d6e80ba637a16ff806c390ede Mon Sep 17 00:00:00 2001 From: yeya24 Date: Mon, 16 Sep 2019 02:28:42 -0400 Subject: [PATCH 190/324] remove an unused variable (#3278) Signed-off-by: yeya24 --- plugin/health/overloaded.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugin/health/overloaded.go b/plugin/health/overloaded.go index 04d6a5f26c5..1c6602b825b 100644 --- a/plugin/health/overloaded.go +++ b/plugin/health/overloaded.go @@ -2,7 +2,6 @@ package health import ( "net/http" - "sync" "time" "github.com/coredns/coredns/plugin" @@ -48,5 +47,3 @@ var ( Help: "Histogram of the time (in seconds) each request took.", }) ) - -var once sync.Once From 7e6e9175cfb16c69d8d8b4e2368390576adda739 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2019 09:53:20 +0000 Subject: [PATCH 191/324] build(deps): bump github.com/prometheus/common from 0.6.0 to 0.7.0 (#3281) Bumps [github.com/prometheus/common](https://github.com/prometheus/common) from 0.6.0 to 0.7.0. - [Release notes](https://github.com/prometheus/common/releases) - [Commits](https://github.com/prometheus/common/compare/v0.6.0...v0.7.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 13557c537ba..718fbc99ae5 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/pkg/errors v0.8.1 // indirect github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 - github.com/prometheus/common v0.6.0 + github.com/prometheus/common v0.7.0 github.com/sirupsen/logrus v1.4.2 // indirect github.com/spf13/pflag v1.0.3 // indirect github.com/tinylib/msgp v1.1.0 // indirect diff --git a/go.sum b/go.sum index e7f7ad092e4..9bb528dbf42 100644 --- a/go.sum +++ b/go.sum @@ -40,7 +40,9 @@ github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoL github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= @@ -107,6 +109,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -268,6 +271,8 @@ github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= From fadc75454a624f9df0f5c7d07f850d2be18b6be3 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2019 10:12:10 +0000 Subject: [PATCH 192/324] build(deps): bump github.com/miekg/dns from 1.1.16 to 1.1.17 (#3280) * build(deps): bump github.com/miekg/dns from 1.1.16 to 1.1.17 Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.16 to 1.1.17. - [Release notes](https://github.com/miekg/dns/releases) - [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release) - [Commits](https://github.com/miekg/dns/compare/v1.1.16...v1.1.17) Signed-off-by: dependabot-preview[bot] * Update go.mod --- go.mod | 4 ++-- go.sum | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 718fbc99ae5..a543f13c4ea 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/miekg/dns v1.1.16 + github.com/miekg/dns v1.1.17 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 @@ -59,6 +59,6 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible - github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.16 + github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.17 golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 => golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 ) diff --git a/go.sum b/go.sum index 9bb528dbf42..48a5d2e3b94 100644 --- a/go.sum +++ b/go.sum @@ -224,6 +224,8 @@ github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJ github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.1.16 h1:iMEQ/IVHxPTtx2Q07JP/k4CKRvSjiAZjZ0hnhgYEDmE= github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= +github.com/miekg/dns v1.1.17 h1:BhJxdA7bH51vKFZSY8Sn9pR7++LREvg0eYFzHA452ew= +github.com/miekg/dns v1.1.17/go.mod h1:WgzbA6oji13JREwiNsRDNfl7jYdPnmz+VEuLrA+/48M= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -329,6 +331,8 @@ golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaE golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -361,6 +365,8 @@ golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 h1:HOTjhHFecQCpFnEhQ4MAH6Gf7 golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -393,6 +399,8 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -415,6 +423,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= From 77e8b388775ec828e8f2e69f661948c183c5b2f4 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 16 Sep 2019 10:12:26 +0000 Subject: [PATCH 193/324] build(deps): bump github.com/aws/aws-sdk-go from 1.23.17 to 1.23.21 (#3282) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.23.17 to 1.23.21. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.23.17...v1.23.21) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index a543f13c4ea..4443623f084 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.23.17 + github.com/aws/aws-sdk-go v1.23.21 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 48a5d2e3b94..6f679681158 100644 --- a/go.sum +++ b/go.sum @@ -49,6 +49,8 @@ github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDm github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.17 h1:IGNAvtR7ckMEHhy+ObG9xw6DFqEE4Ual0LYXsVTZSLQ= github.com/aws/aws-sdk-go v1.23.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.21 h1:eVJT2C99cAjZlBY8+CJovf6AwrSANzAcYNuxdCB+SPk= +github.com/aws/aws-sdk-go v1.23.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 2324439adfeffa94c6b5299f54857b54b2506952 Mon Sep 17 00:00:00 2001 From: Guangming Wang Date: Thu, 19 Sep 2019 14:04:19 +0800 Subject: [PATCH 194/324] ready_test.go: rm t.Fatalf in goroutine (#3284) Signed-off-by: Guangming Wang --- plugin/ready/ready_test.go | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/plugin/ready/ready_test.go b/plugin/ready/ready_test.go index 7587bad9bb3..fff19cc9135 100644 --- a/plugin/ready/ready_test.go +++ b/plugin/ready/ready_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "net/http" - "sync" "testing" "github.com/coredns/coredns/plugin/erratic" @@ -21,33 +20,22 @@ func TestReady(t *testing.T) { e := &erratic.Erratic{} plugins.Append(e, "erratic") - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - if err := rd.onStartup(); err != nil { - t.Fatalf("Unable to startup the readiness server: %v", err) - } - wg.Done() - }() - wg.Wait() + if err := rd.onStartup(); err != nil { + t.Fatalf("Unable to startup the readiness server: %v", err) + } defer rd.onFinalShutdown() address := fmt.Sprintf("http://%s/ready", rd.ln.Addr().String()) - wg.Add(1) - go func() { - response, err := http.Get(address) - if err != nil { - t.Fatalf("Unable to query %s: %v", address, err) - } - if response.StatusCode != 503 { - t.Errorf("Invalid status code: expecting %d, got %d", 503, response.StatusCode) - } - response.Body.Close() - wg.Done() - }() - wg.Wait() + response, err := http.Get(address) + if err != nil { + t.Fatalf("Unable to query %s: %v", address, err) + } + if response.StatusCode != 503 { + t.Errorf("Invalid status code: expecting %d, got %d", 503, response.StatusCode) + } + response.Body.Close() // make it ready by giving erratic 3 queries. m := new(dns.Msg) @@ -56,7 +44,7 @@ func TestReady(t *testing.T) { e.ServeDNS(context.TODO(), &test.ResponseWriter{}, m) e.ServeDNS(context.TODO(), &test.ResponseWriter{}, m) - response, err := http.Get(address) + response, err = http.Get(address) if err != nil { t.Fatalf("Unable to query %s: %v", address, err) } From 31299108ce374ce59ba6a3d7d16fde039ea47bc1 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 19 Sep 2019 14:16:36 +0100 Subject: [PATCH 195/324] Run go mod -tidy (#3286) * Run go mod -tidy Tidy up and fix the require of x/net to the hash used in miekg/dns, this seems to fix the "incompatible version bla bla" Signed-off-by: Miek Gieben * Run go mod tidy Signed-off-by: Miek Gieben --- go.mod | 9 +++------ go.sum | 19 ------------------- 2 files changed, 3 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index 4443623f084..e098479aa9f 100644 --- a/go.mod +++ b/go.mod @@ -32,18 +32,15 @@ require ( github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 github.com/philhofer/fwd v1.0.0 // indirect - github.com/pkg/errors v0.8.1 // indirect github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/common v0.7.0 - github.com/sirupsen/logrus v1.4.2 // indirect github.com/spf13/pflag v1.0.3 // indirect github.com/tinylib/msgp v1.1.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c - golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 - golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 - golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect + golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 + golang.org/x/sys v0.0.0-20190904154756-749cb33beabd google.golang.org/api v0.10.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.23.0 @@ -60,5 +57,5 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.17 - golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 => golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 + golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 => golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 ) diff --git a/go.sum b/go.sum index 6f679681158..3fbefaf6fd5 100644 --- a/go.sum +++ b/go.sum @@ -45,10 +45,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= -github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.23.17 h1:IGNAvtR7ckMEHhy+ObG9xw6DFqEE4Ual0LYXsVTZSLQ= -github.com/aws/aws-sdk-go v1.23.17/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.21 h1:eVJT2C99cAjZlBY8+CJovf6AwrSANzAcYNuxdCB+SPk= github.com/aws/aws-sdk-go v1.23.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -224,8 +220,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJC3+BNM6LJs1x21rivK7yxfTZMAuY2s= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.16 h1:iMEQ/IVHxPTtx2Q07JP/k4CKRvSjiAZjZ0hnhgYEDmE= -github.com/miekg/dns v1.1.16/go.mod h1:YNV562EiewvSmpCB6/W4c6yqjK7Z+M/aIS1JHsIVeg8= github.com/miekg/dns v1.1.17 h1:BhJxdA7bH51vKFZSY8Sn9pR7++LREvg0eYFzHA452ew= github.com/miekg/dns v1.1.17/go.mod h1:WgzbA6oji13JREwiNsRDNfl7jYdPnmz+VEuLrA+/48M= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -323,7 +317,6 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -331,8 +324,6 @@ golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -347,7 +338,6 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -363,10 +353,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 h1:HOTjhHFecQCpFnEhQ4MAH6Gf7yGklZnqaHvtezTKqnQ= -golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -383,7 +369,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -430,8 +415,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+y golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ= google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -453,8 +436,6 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4nH2UWo= -gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54= gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From 62317c3c14e306e337f7aa0507a2245495c7aed2 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 19 Sep 2019 14:17:53 +0100 Subject: [PATCH 196/324] update doc to not use the root zone for everything (#3288) Update all documentation in the tree to use example.org as an example configuration (in so far possible). As to get out of the just use "." and fallthrough and things would be fine. Signed-off-by: Miek Gieben --- README.md | 20 +++++++++++++------- plugin/auto/README.md | 4 ++-- plugin/cache/README.md | 14 +++++++------- plugin/cancel/README.md | 4 ++-- plugin/clouddns/README.md | 4 ++-- plugin/debug/README.md | 2 +- plugin/erratic/README.md | 13 ++++++------- plugin/errors/README.md | 4 ++-- plugin/etcd/README.md | 20 ++++++++++++++------ plugin/hosts/README.md | 5 +++-- plugin/log/README.md | 2 +- plugin/nsid/README.md | 2 +- plugin/route53/README.md | 11 +++++++---- plugin/whoami/README.md | 2 +- 14 files changed, 62 insertions(+), 45 deletions(-) diff --git a/README.md b/README.md index d2508c830f8..72f2ef2c944 100644 --- a/README.md +++ b/README.md @@ -82,8 +82,11 @@ When starting CoreDNS without any configuration, it loads the ~~~ txt .:53 -2016/09/18 09:20:50 [INFO] CoreDNS-001 -CoreDNS-001 + ______ ____ _ _______ + / ____/___ ________ / __ \/ | / / ___/ ~ CoreDNS-1.6.3 + / / / __ \/ ___/ _ \/ / / / |/ /\__ \ ~ linux/amd64, go1.13, +/ /___/ /_/ / / / __/ /_/ / /| /___/ / +\____/\____/_/ \___/_____/_/ |_//____/ ~~~ Any query sent to port 53 should return some information; your sending address, port and protocol @@ -128,17 +131,20 @@ Serve `example.org` on port 1053, but forward everything that does *not* match ` recursive nameserver *and* rewrite ANY queries to HINFO. ~~~ txt -.:1053 { - rewrite ANY HINFO - forward . 8.8.8.8:53 - - file /var/lib/coredns/example.org.signed example.org { +example.org:1053 { + file /var/lib/coredns/example.org.signed { transfer to * transfer to 2001:500:8f::53 } errors log } +. { + any + forward . 8.8.8.8:53 + errors + log +} ~~~ IP addresses are also allowed. They are automatically converted to reverse zones: diff --git a/plugin/auto/README.md b/plugin/auto/README.md index e313c966dc9..26c232c4d17 100644 --- a/plugin/auto/README.md +++ b/plugin/auto/README.md @@ -56,8 +56,8 @@ Load `org` domains from `/etc/coredns/zones/org` and allow transfers to the inte notifies to 10.240.1.1 ~~~ corefile -. { - auto org { +org { + auto { directory /etc/coredns/zones/org transfer to * transfer to 10.240.1.1 diff --git a/plugin/cache/README.md b/plugin/cache/README.md index 8b2bdf075ca..9f97ba437af 100644 --- a/plugin/cache/README.md +++ b/plugin/cache/README.md @@ -93,13 +93,13 @@ Proxy to Google Public DNS and only cache responses for example.org (or below). } ~~~ -Enable caching for all zones, keep a positive cache size of 5000 and a negative cache size of 2500: +Enable caching for `example.org`, keep a positive cache size of 5000 and a negative cache size of 2500: ~~~ corefile - . { - cache { - success 5000 - denial 2500 +example.org { + cache { + success 5000 + denial 2500 } - } - ~~~ +} +~~~ diff --git a/plugin/cancel/README.md b/plugin/cancel/README.md index 9f7c470d940..8561b1f6b86 100644 --- a/plugin/cancel/README.md +++ b/plugin/cancel/README.md @@ -25,7 +25,7 @@ cancel [TIMEOUT] ## Examples ~~~ corefile -. { +example.org { cancel whoami } @@ -34,7 +34,7 @@ cancel [TIMEOUT] Or with a custom timeout: ~~~ corefile -. { +example.org { cancel 1s whoami } diff --git a/plugin/clouddns/README.md b/plugin/clouddns/README.md index dcb9aafa8ee..18d95cb31a3 100644 --- a/plugin/clouddns/README.md +++ b/plugin/clouddns/README.md @@ -49,7 +49,7 @@ clouddns [ZONE:PROJECT_ID:HOSTED_ZONE_NAME...] { Enable clouddns with implicit GCP credentials and resolve CNAMEs via 10.0.0.1: ~~~ txt -. { +example.org { clouddns example.org.:gcp-example-project:example-zone forward . 10.0.0.1 } @@ -58,7 +58,7 @@ Enable clouddns with implicit GCP credentials and resolve CNAMEs via 10.0.0.1: Enable clouddns with fallthrough: ~~~ txt -. { +example.org { clouddns example.org.:gcp-example-project:example-zone clouddns example.com.:gcp-example-project:example-zone-2 { fallthrough example.gov. } diff --git a/plugin/debug/README.md b/plugin/debug/README.md index fd769843d8b..a6234866d6a 100644 --- a/plugin/debug/README.md +++ b/plugin/debug/README.md @@ -45,4 +45,4 @@ Disable the ability to recover from crashes and show debug logging: ## Also See -https://www.wireshark.org/docs/man-pages/text2pcap.html. +. diff --git a/plugin/erratic/README.md b/plugin/erratic/README.md index e3bd79ae80a..dcd74097cc0 100644 --- a/plugin/erratic/README.md +++ b/plugin/erratic/README.md @@ -40,7 +40,7 @@ This plugin reports readiness to the ready plugin. ## Examples ~~~ corefile -. { +example.org { erratic { drop 3 } @@ -50,7 +50,7 @@ This plugin reports readiness to the ready plugin. Or even shorter if the defaults suits you. Note this only drops queries, it does not delay them. ~~~ corefile -. { +example.org { erratic } ~~~ @@ -58,7 +58,7 @@ Or even shorter if the defaults suits you. Note this only drops queries, it does Delay 1 in 3 queries for 50ms ~~~ corefile -. { +example.org { erratic { delay 3 50ms } @@ -68,7 +68,7 @@ Delay 1 in 3 queries for 50ms Delay 1 in 3 and truncate 1 in 5. ~~~ corefile -. { +example.org { erratic { delay 3 5ms truncate 5 @@ -79,7 +79,7 @@ Delay 1 in 3 and truncate 1 in 5. Drop every second query. ~~~ corefile -. { +example.org { erratic { drop 2 truncate 2 @@ -89,5 +89,4 @@ Drop every second query. ## Also See -[RFC 3849](https://tools.ietf.org/html/rfc3849) and -[RFC 5737](https://tools.ietf.org/html/rfc5737). +[RFC 3849](https://tools.ietf.org/html/rfc3849) and [RFC 5737](https://tools.ietf.org/html/rfc5737). diff --git a/plugin/errors/README.md b/plugin/errors/README.md index 5feef2694a9..61ed582cb5a 100644 --- a/plugin/errors/README.md +++ b/plugin/errors/README.md @@ -38,10 +38,10 @@ For better performance, it's recommended to use the `^` or `$` metacharacters in ## Examples -Use the *whoami* to respond to queries and Log errors to standard output. +Use the *whoami* to respond to queries in the example.org domain and Log errors to standard output. ~~~ corefile -. { +example.org { whoami errors } diff --git a/plugin/etcd/README.md b/plugin/etcd/README.md index 3b909fcac3d..9b7f75f2957 100644 --- a/plugin/etcd/README.md +++ b/plugin/etcd/README.md @@ -77,15 +77,19 @@ This causes two lookups from CoreDNS to etcdv3 in certain cases. This is the default SkyDNS setup, with everything specified in full: ~~~ corefile -. { - etcd skydns.local { +skydns.local { + etcd { path /skydns endpoint http://localhost:2379 } prometheus - cache 160 skydns.local + cache loadbalance +} + +. { forward . 8.8.8.8:53 8.8.4.4:53 + cache } ~~~ @@ -93,12 +97,16 @@ Or a setup where we use `/etc/resolv.conf` as the basis for the proxy and the up when resolving external pointing CNAMEs. ~~~ corefile -. { - etcd skydns.local { +skydns.local { + etcd { path /skydns } - cache 160 skydns.local + cache +} + +. { forward . /etc/resolv.conf + cache } ~~~ diff --git a/plugin/hosts/README.md b/plugin/hosts/README.md index b18438cc6b6..044ce367104 100644 --- a/plugin/hosts/README.md +++ b/plugin/hosts/README.md @@ -104,11 +104,12 @@ next plugin if query doesn't match. Load hosts file inlined in Corefile. ~~~ -. { - hosts example.hosts example.org { +example.hosts example.org { + hosts { 10.0.0.1 example.org fallthrough } + whoami } ~~~ diff --git a/plugin/log/README.md b/plugin/log/README.md index bbd853bd31f..8b397bffa24 100644 --- a/plugin/log/README.md +++ b/plugin/log/README.md @@ -92,7 +92,7 @@ The default Common Log Format is: Each of these logs will be outputted with `log.Infof`, so a typical example looks like this: ~~~ txt -2018-10-30T19:10:07.547Z [INFO] [::1]:50759 - 29008 "A IN example.org. udp 41 false 4096" NOERROR qr,rd,ra,ad 68 0.037990251s +[INFO] [::1]:50759 - 29008 "A IN example.org. udp 41 false 4096" NOERROR qr,rd,ra,ad 68 0.037990251s ~~~~ ## Examples diff --git a/plugin/nsid/README.md b/plugin/nsid/README.md index 0ff5cd764ee..c50acbf59b5 100644 --- a/plugin/nsid/README.md +++ b/plugin/nsid/README.md @@ -28,7 +28,7 @@ If **DATA** is not given, the host's name is used. Enable nsid: ~~~ corefile -. { +example.org { whoami nsid Use The Force } diff --git a/plugin/route53/README.md b/plugin/route53/README.md index e704ced602d..c0e7dd2dfd2 100644 --- a/plugin/route53/README.md +++ b/plugin/route53/README.md @@ -62,8 +62,11 @@ route53 [ZONE:HOSTED_ZONE_ID...] { Enable route53 with implicit AWS credentials and resolve CNAMEs via 10.0.0.1: ~~~ txt -. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 +} + +. { forward . 10.0.0.1 } ~~~ @@ -71,7 +74,7 @@ Enable route53 with implicit AWS credentials and resolve CNAMEs via 10.0.0.1: Enable route53 with explicit AWS credentials: ~~~ txt -. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { aws_access_key AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY } @@ -91,14 +94,14 @@ Enable route53 with fallthrough: Enable route53 with multiple hosted zones with the same domain: ~~~ txt -. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 example.org.:Z93A52145678156 } ~~~ Enable route53 and refresh records every 3 minutes ~~~ txt -. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { refresh 3m } diff --git a/plugin/whoami/README.md b/plugin/whoami/README.md index 7417132cc1d..55d0388d737 100644 --- a/plugin/whoami/README.md +++ b/plugin/whoami/README.md @@ -34,7 +34,7 @@ whoami Start a server on the default port and load the *whoami* plugin. ~~~ corefile -. { +example.org { whoami } ~~~ From 85e65702bd5b02c0ced5be51e02860c5ae9321aa Mon Sep 17 00:00:00 2001 From: yeya24 Date: Thu, 19 Sep 2019 11:38:15 -0400 Subject: [PATCH 197/324] add host metrics (#3277) * add host metrics Signed-off-by: yeya24 * update hosts readme docs Signed-off-by: yeya24 --- plugin/hosts/README.md | 7 +++++++ plugin/hosts/hostsfile.go | 20 ++++++++++++++++++++ plugin/hosts/setup.go | 7 +++++++ plugin/ready/ready.go | 2 +- 4 files changed, 35 insertions(+), 1 deletion(-) diff --git a/plugin/hosts/README.md b/plugin/hosts/README.md index 044ce367104..270a8200bdc 100644 --- a/plugin/hosts/README.md +++ b/plugin/hosts/README.md @@ -72,6 +72,13 @@ hosts [FILE [ZONES...]] { is authoritative. If specific zones are listed (for example `in-addr.arpa` and `ip6.arpa`), then only queries for those zones will be subject to fallthrough. +## Metrics + +If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: + +- `coredns_hosts_entries_count{}` - The combined number of entries in hosts and Corefile. +- `coredns_hosts_reload_timestamp_seconds{}` - The timestamp of the last reload of hosts file. + ## Examples Load `/etc/hosts` file. diff --git a/plugin/hosts/hostsfile.go b/plugin/hosts/hostsfile.go index f7cc73528ba..ed9763de17d 100644 --- a/plugin/hosts/hostsfile.go +++ b/plugin/hosts/hostsfile.go @@ -17,6 +17,8 @@ import ( "time" "github.com/coredns/coredns/plugin" + + "github.com/prometheus/client_golang/prometheus" ) // parseIP calls discards any v6 zone info, before calling net.ParseIP. @@ -135,6 +137,8 @@ func (h *Hostsfile) readHosts() { h.mtime = stat.ModTime() h.size = stat.Size() + hostsEntries.WithLabelValues().Set(float64(h.inline.Len() + h.hmap.Len())) + hostsReloadTime.Set(float64(stat.ModTime().UnixNano()) / 1e9) h.Unlock() } @@ -252,3 +256,19 @@ func (h *Hostsfile) LookupStaticAddr(addr string) []string { copy(hostsCp[len(hosts1):], hosts2) return hostsCp } + +var ( + hostsEntries = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: plugin.Namespace, + Subsystem: "hosts", + Name: "entries_count", + Help: "The combined number of entries in hosts and Corefile.", + }, []string{}) + + hostsReloadTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: plugin.Namespace, + Subsystem: "hosts", + Name: "reload_timestamp_seconds", + Help: "The timestamp of the last reload of hosts file.", + }) +) diff --git a/plugin/hosts/setup.go b/plugin/hosts/setup.go index 981ea141f36..be95a547fb5 100644 --- a/plugin/hosts/setup.go +++ b/plugin/hosts/setup.go @@ -9,6 +9,7 @@ import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/metrics" clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/caddyserver/caddy" @@ -57,6 +58,12 @@ func setup(c *caddy.Controller) error { return nil }) + c.OnStartup(func() error { + metrics.MustRegister(c, hostsEntries) + metrics.MustRegister(c, hostsReloadTime) + return nil + }) + c.OnShutdown(func() error { close(parseChan) return nil diff --git a/plugin/ready/ready.go b/plugin/ready/ready.go index 0b08c22f4ac..f40682041df 100644 --- a/plugin/ready/ready.go +++ b/plugin/ready/ready.go @@ -45,7 +45,7 @@ func (rd *ready) onStartup() error { ok, todo := plugins.Ready() if ok { w.WriteHeader(http.StatusOK) - io.WriteString(w, http.StatusText(http.StatusOK)) + io.WriteString(w, http.StatusText(http.StatusOK)) return } log.Infof("Still waiting on: %q", todo) From 004c5fca9d7dfb8b58d608e4d50833b40953441f Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 20 Sep 2019 08:02:30 +0100 Subject: [PATCH 198/324] all: simply registering plugins (#3287) Abstract the caddy call and make it simpler. See #3261 for some part of the discussion. Go from: ~~~ go func init() { caddy.RegisterPlugin("any", caddy.Plugin{ ServerType: "dns", Action: setup, }) } ~~~ To: ~~~ go func init() { plugin.Register("any", setup) } ~~~ This requires some external documents in coredns.io to be updated as well; the old way still works, so it's backwards compatible. Signed-off-by: Miek Gieben --- plugin/acl/setup.go | 7 +------ plugin/any/setup.go | 7 +------ plugin/auto/setup.go | 7 +------ plugin/autopath/setup.go | 8 +------- plugin/azure/setup.go | 7 +------ plugin/cache/setup.go | 7 +------ plugin/chaos/setup.go | 8 +------- plugin/clouddns/setup.go | 7 +++---- plugin/deprecated/setup.go | 9 +++------ plugin/dnssec/setup.go | 7 +------ plugin/dnstap/setup.go | 7 +------ plugin/erratic/setup.go | 7 +------ plugin/errors/setup.go | 7 +------ plugin/etcd/setup.go | 7 +------ plugin/file/setup.go | 7 +------ plugin/forward/setup.go | 7 +------ plugin/grpc/setup.go | 7 +------ plugin/health/setup.go | 7 +------ plugin/hosts/setup.go | 7 +------ plugin/k8s_external/setup.go | 7 +------ plugin/kubernetes/setup.go | 7 +------ plugin/loadbalance/setup.go | 7 +------ plugin/log/setup.go | 7 +------ plugin/loop/setup.go | 7 +------ plugin/metadata/setup.go | 7 +------ plugin/metrics/setup.go | 7 +------ plugin/nsid/setup.go | 7 +------ plugin/pprof/setup.go | 7 +------ plugin/ready/setup.go | 7 +------ plugin/register.go | 11 +++++++++++ plugin/reload/setup.go | 7 +------ plugin/rewrite/setup.go | 7 +------ plugin/route53/setup.go | 7 +++---- plugin/secondary/setup.go | 7 +------ plugin/sign/setup.go | 7 +------ plugin/template/setup.go | 7 +------ plugin/trace/setup.go | 7 +------ plugin/whoami/setup.go | 7 +------ 38 files changed, 54 insertions(+), 220 deletions(-) create mode 100644 plugin/register.go diff --git a/plugin/acl/setup.go b/plugin/acl/setup.go index 1179175dd1f..74973771408 100644 --- a/plugin/acl/setup.go +++ b/plugin/acl/setup.go @@ -13,12 +13,7 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("acl", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("acl", setup) } func newDefaultFilter() *iptree.Tree { defaultFilter := iptree.NewTree() diff --git a/plugin/any/setup.go b/plugin/any/setup.go index f2d709e268b..703b8e5d2c5 100644 --- a/plugin/any/setup.go +++ b/plugin/any/setup.go @@ -7,12 +7,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("any", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("any", setup) } func setup(c *caddy.Controller) error { a := Any{} diff --git a/plugin/auto/setup.go b/plugin/auto/setup.go index f62aa71683a..e6495b192bd 100644 --- a/plugin/auto/setup.go +++ b/plugin/auto/setup.go @@ -18,12 +18,7 @@ import ( var log = clog.NewWithPlugin("auto") -func init() { - caddy.RegisterPlugin("auto", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("auto", setup) } func setup(c *caddy.Controller) error { a, err := autoParse(c) diff --git a/plugin/autopath/setup.go b/plugin/autopath/setup.go index 2f69a3eb2c7..94dde59533a 100644 --- a/plugin/autopath/setup.go +++ b/plugin/autopath/setup.go @@ -11,13 +11,7 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("autopath", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) - -} +func init() { plugin.Register("autopath", setup) } func setup(c *caddy.Controller) error { ap, mw, err := autoPathParse(c) diff --git a/plugin/azure/setup.go b/plugin/azure/setup.go index ef5711c00e8..15ebb7d6fc3 100644 --- a/plugin/azure/setup.go +++ b/plugin/azure/setup.go @@ -17,12 +17,7 @@ import ( var log = clog.NewWithPlugin("azure") -func init() { - caddy.RegisterPlugin("azure", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("azure", setup) } func setup(c *caddy.Controller) error { env, keys, fall, err := parse(c) diff --git a/plugin/cache/setup.go b/plugin/cache/setup.go index 3bffe8581f4..e4ca494dbfb 100644 --- a/plugin/cache/setup.go +++ b/plugin/cache/setup.go @@ -16,12 +16,7 @@ import ( var log = clog.NewWithPlugin("cache") -func init() { - caddy.RegisterPlugin("cache", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("cache", setup) } func setup(c *caddy.Controller) error { ca, err := cacheParse(c) diff --git a/plugin/chaos/setup.go b/plugin/chaos/setup.go index 33eba8a5002..42ce76b4f3e 100644 --- a/plugin/chaos/setup.go +++ b/plugin/chaos/setup.go @@ -11,13 +11,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("chaos", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) - -} +func init() { plugin.Register("chaos", setup) } func setup(c *caddy.Controller) error { version, authors, err := parse(c) diff --git a/plugin/clouddns/setup.go b/plugin/clouddns/setup.go index 732c240f77f..35c7e9179e1 100644 --- a/plugin/clouddns/setup.go +++ b/plugin/clouddns/setup.go @@ -18,9 +18,8 @@ import ( var log = clog.NewWithPlugin("clouddns") func init() { - caddy.RegisterPlugin("clouddns", caddy.Plugin{ - ServerType: "dns", - Action: func(c *caddy.Controller) error { + plugin.Register("clouddns", + func(c *caddy.Controller) error { f := func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { var err error var client *gcp.Service @@ -35,7 +34,7 @@ func init() { } return setup(c, f) }, - }) + ) } func setup(c *caddy.Controller, f func(ctx context.Context, opt option.ClientOption) (gcpDNS, error)) error { diff --git a/plugin/deprecated/setup.go b/plugin/deprecated/setup.go index 36c13bdb639..782f36f042e 100644 --- a/plugin/deprecated/setup.go +++ b/plugin/deprecated/setup.go @@ -20,7 +20,7 @@ import ( ) // removed has the names of the plugins that need to error on startup. -var removed = []string{"reverse"} +var removed = []string{""} func setup(c *caddy.Controller) error { c.Next() @@ -29,10 +29,7 @@ func setup(c *caddy.Controller) error { } func init() { - for _, plugin := range removed { - caddy.RegisterPlugin(plugin, caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) + for _, plug := range removed { + plugin.Register(plug, setup) } } diff --git a/plugin/dnssec/setup.go b/plugin/dnssec/setup.go index 54a2390fd19..f410fb8b5a5 100644 --- a/plugin/dnssec/setup.go +++ b/plugin/dnssec/setup.go @@ -17,12 +17,7 @@ import ( var log = clog.NewWithPlugin("dnssec") -func init() { - caddy.RegisterPlugin("dnssec", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("dnssec", setup) } func setup(c *caddy.Controller) error { zones, keys, capacity, splitkeys, err := dnssecParse(c) diff --git a/plugin/dnstap/setup.go b/plugin/dnstap/setup.go index c9595ec73cc..5baec5c6e92 100644 --- a/plugin/dnstap/setup.go +++ b/plugin/dnstap/setup.go @@ -15,12 +15,7 @@ import ( var log = clog.NewWithPlugin("dnstap") -func init() { - caddy.RegisterPlugin("dnstap", caddy.Plugin{ - ServerType: "dns", - Action: wrapSetup, - }) -} +func init() { plugin.Register("dnstap", wrapSetup) } func wrapSetup(c *caddy.Controller) error { if err := setup(c); err != nil { diff --git a/plugin/erratic/setup.go b/plugin/erratic/setup.go index 98ac4f0c2ab..1cc6e048df7 100644 --- a/plugin/erratic/setup.go +++ b/plugin/erratic/setup.go @@ -11,12 +11,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("erratic", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("erratic", setup) } func setup(c *caddy.Controller) error { e, err := parseErratic(c) diff --git a/plugin/errors/setup.go b/plugin/errors/setup.go index a196d220f7c..283f3dd1508 100644 --- a/plugin/errors/setup.go +++ b/plugin/errors/setup.go @@ -10,12 +10,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("errors", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("errors", setup) } func setup(c *caddy.Controller) error { handler, err := errorsParse(c) diff --git a/plugin/etcd/setup.go b/plugin/etcd/setup.go index d7827c875d7..c2916de517f 100644 --- a/plugin/etcd/setup.go +++ b/plugin/etcd/setup.go @@ -15,12 +15,7 @@ import ( var log = clog.NewWithPlugin("etcd") -func init() { - caddy.RegisterPlugin("etcd", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("etcd", setup) } func setup(c *caddy.Controller) error { e, err := etcdParse(c) diff --git a/plugin/file/setup.go b/plugin/file/setup.go index ce78e815e1c..44ecf2ca1e9 100644 --- a/plugin/file/setup.go +++ b/plugin/file/setup.go @@ -13,12 +13,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("file", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("file", setup) } func setup(c *caddy.Controller) error { zones, err := fileParse(c) diff --git a/plugin/forward/setup.go b/plugin/forward/setup.go index d084d61180b..60ec08fa3ac 100644 --- a/plugin/forward/setup.go +++ b/plugin/forward/setup.go @@ -16,12 +16,7 @@ import ( "github.com/caddyserver/caddy/caddyfile" ) -func init() { - caddy.RegisterPlugin("forward", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("forward", setup) } func setup(c *caddy.Controller) error { f, err := parseForward(c) diff --git a/plugin/grpc/setup.go b/plugin/grpc/setup.go index aecef9d8359..5c0c9a4521b 100644 --- a/plugin/grpc/setup.go +++ b/plugin/grpc/setup.go @@ -14,12 +14,7 @@ import ( "github.com/caddyserver/caddy/caddyfile" ) -func init() { - caddy.RegisterPlugin("grpc", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("grpc", setup) } func setup(c *caddy.Controller) error { g, err := parseGRPC(c) diff --git a/plugin/health/setup.go b/plugin/health/setup.go index 75e35d8e603..1f72cb245dc 100644 --- a/plugin/health/setup.go +++ b/plugin/health/setup.go @@ -11,12 +11,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("health", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("health", setup) } func setup(c *caddy.Controller) error { addr, lame, err := parse(c) diff --git a/plugin/hosts/setup.go b/plugin/hosts/setup.go index be95a547fb5..09f030842d0 100644 --- a/plugin/hosts/setup.go +++ b/plugin/hosts/setup.go @@ -17,12 +17,7 @@ import ( var log = clog.NewWithPlugin("hosts") -func init() { - caddy.RegisterPlugin("hosts", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("hosts", setup) } func periodicHostsUpdate(h *Hosts) chan bool { parseChan := make(chan bool) diff --git a/plugin/k8s_external/setup.go b/plugin/k8s_external/setup.go index ed645c0c859..5c2dce0b2e2 100644 --- a/plugin/k8s_external/setup.go +++ b/plugin/k8s_external/setup.go @@ -9,12 +9,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("k8s_external", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("k8s_external", setup) } func setup(c *caddy.Controller) error { e, err := parse(c) diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go index 1309139d738..63d85fee967 100644 --- a/plugin/kubernetes/setup.go +++ b/plugin/kubernetes/setup.go @@ -34,12 +34,7 @@ import ( var log = clog.NewWithPlugin("kubernetes") -func init() { - caddy.RegisterPlugin("kubernetes", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("kubernetes", setup) } func setup(c *caddy.Controller) error { klog.SetOutput(os.Stdout) diff --git a/plugin/loadbalance/setup.go b/plugin/loadbalance/setup.go index c31fc15e161..4c5a6bfd8f8 100644 --- a/plugin/loadbalance/setup.go +++ b/plugin/loadbalance/setup.go @@ -12,12 +12,7 @@ import ( var log = clog.NewWithPlugin("loadbalance") -func init() { - caddy.RegisterPlugin("loadbalance", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("loadbalance", setup) } func setup(c *caddy.Controller) error { err := parse(c) diff --git a/plugin/log/setup.go b/plugin/log/setup.go index 2e89d1f8108..f9441bf9259 100644 --- a/plugin/log/setup.go +++ b/plugin/log/setup.go @@ -12,12 +12,7 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("log", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("log", setup) } func setup(c *caddy.Controller) error { rules, err := logParse(c) diff --git a/plugin/loop/setup.go b/plugin/loop/setup.go index 30f1a25279f..3cf4861ac76 100644 --- a/plugin/loop/setup.go +++ b/plugin/loop/setup.go @@ -13,12 +13,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("loop", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("loop", setup) } func setup(c *caddy.Controller) error { l, err := parse(c) diff --git a/plugin/metadata/setup.go b/plugin/metadata/setup.go index 665216ea8f6..734ea678ee2 100644 --- a/plugin/metadata/setup.go +++ b/plugin/metadata/setup.go @@ -7,12 +7,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("metadata", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("metadata", setup) } func setup(c *caddy.Controller) error { m, err := metadataParse(c) diff --git a/plugin/metrics/setup.go b/plugin/metrics/setup.go index 744db7cd3b2..719362609a5 100644 --- a/plugin/metrics/setup.go +++ b/plugin/metrics/setup.go @@ -20,12 +20,7 @@ var ( registry = newReg() ) -func init() { - caddy.RegisterPlugin("prometheus", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("prometheus", setup) } func setup(c *caddy.Controller) error { m, err := parse(c) diff --git a/plugin/nsid/setup.go b/plugin/nsid/setup.go index c851c7d20f1..3fa0edd8547 100644 --- a/plugin/nsid/setup.go +++ b/plugin/nsid/setup.go @@ -10,12 +10,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("nsid", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("nsid", setup) } func setup(c *caddy.Controller) error { nsid, err := nsidParse(c) diff --git a/plugin/pprof/setup.go b/plugin/pprof/setup.go index 175a160113d..8de3b2ac0bf 100644 --- a/plugin/pprof/setup.go +++ b/plugin/pprof/setup.go @@ -15,12 +15,7 @@ var log = clog.NewWithPlugin("pprof") const defaultAddr = "localhost:6053" -func init() { - caddy.RegisterPlugin("pprof", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("pprof", setup) } func setup(c *caddy.Controller) error { h := &handler{addr: defaultAddr} diff --git a/plugin/ready/setup.go b/plugin/ready/setup.go index eb1e1a8a463..3c0e2ce4379 100644 --- a/plugin/ready/setup.go +++ b/plugin/ready/setup.go @@ -9,12 +9,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("ready", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("ready", setup) } func setup(c *caddy.Controller) error { addr, err := parse(c) diff --git a/plugin/register.go b/plugin/register.go new file mode 100644 index 00000000000..2d74b262235 --- /dev/null +++ b/plugin/register.go @@ -0,0 +1,11 @@ +package plugin + +import "github.com/caddyserver/caddy" + +// Register registers your plugin with CoreDNS and allows it to be called when the server is running. +func Register(name string, action caddy.SetupFunc) { + caddy.RegisterPlugin(name, caddy.Plugin{ + ServerType: "dns", + Action: action, + }) +} diff --git a/plugin/reload/setup.go b/plugin/reload/setup.go index b52afe56079..c0dcc14e9fa 100644 --- a/plugin/reload/setup.go +++ b/plugin/reload/setup.go @@ -14,12 +14,7 @@ import ( var log = clog.NewWithPlugin("reload") -func init() { - caddy.RegisterPlugin("reload", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("reload", setup) } // the info reload is global to all application, whatever number of reloads. // it is used to transmit data between Setup and start of the hook called 'onInstanceStartup' diff --git a/plugin/rewrite/setup.go b/plugin/rewrite/setup.go index 00c0a2f2c99..6b6bcce6695 100644 --- a/plugin/rewrite/setup.go +++ b/plugin/rewrite/setup.go @@ -10,12 +10,7 @@ import ( var log = clog.NewWithPlugin("rewrite") -func init() { - caddy.RegisterPlugin("rewrite", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("rewrite", setup) } func setup(c *caddy.Controller) error { rewrites, err := rewriteParse(c) diff --git a/plugin/route53/setup.go b/plugin/route53/setup.go index 23fb5e74ff8..31169edafc8 100644 --- a/plugin/route53/setup.go +++ b/plugin/route53/setup.go @@ -25,9 +25,8 @@ import ( var log = clog.NewWithPlugin("route53") func init() { - caddy.RegisterPlugin("route53", caddy.Plugin{ - ServerType: "dns", - Action: func(c *caddy.Controller) error { + plugin.Register("route53", + func(c *caddy.Controller) error { f := func(credential *credentials.Credentials) route53iface.Route53API { return route53.New(session.Must(session.NewSession(&aws.Config{ Credentials: credential, @@ -35,7 +34,7 @@ func init() { } return setup(c, f) }, - }) + ) } func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Route53API) error { diff --git a/plugin/secondary/setup.go b/plugin/secondary/setup.go index 401b2af7403..410bc097634 100644 --- a/plugin/secondary/setup.go +++ b/plugin/secondary/setup.go @@ -10,12 +10,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("secondary", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("secondary", setup) } func setup(c *caddy.Controller) error { zones, err := secondaryParse(c) diff --git a/plugin/sign/setup.go b/plugin/sign/setup.go index 6cc3b711dcc..8f4d4abd5c2 100644 --- a/plugin/sign/setup.go +++ b/plugin/sign/setup.go @@ -12,12 +12,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("sign", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("sign", setup) } func setup(c *caddy.Controller) error { sign, err := parse(c) diff --git a/plugin/template/setup.go b/plugin/template/setup.go index 045459ef1d4..908266f291d 100644 --- a/plugin/template/setup.go +++ b/plugin/template/setup.go @@ -12,12 +12,7 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("template", caddy.Plugin{ - ServerType: "dns", - Action: setupTemplate, - }) -} +func init() { plugin.Register("template", setupTemplate) } func setupTemplate(c *caddy.Controller) error { handler, err := templateParse(c) diff --git a/plugin/trace/setup.go b/plugin/trace/setup.go index 87d9e7e2440..64931933a4b 100644 --- a/plugin/trace/setup.go +++ b/plugin/trace/setup.go @@ -11,12 +11,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("trace", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("trace", setup) } func setup(c *caddy.Controller) error { t, err := traceParse(c) diff --git a/plugin/whoami/setup.go b/plugin/whoami/setup.go index f0ae1e6d662..6c19b72d6b6 100644 --- a/plugin/whoami/setup.go +++ b/plugin/whoami/setup.go @@ -7,12 +7,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("whoami", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("whoami", setup) } func setup(c *caddy.Controller) error { c.Next() // 'whoami' From 081e45afa3c72dcde92c771b5adac2e1521d278e Mon Sep 17 00:00:00 2001 From: Guangming Wang Date: Mon, 23 Sep 2019 21:40:14 +0800 Subject: [PATCH 199/324] cleanup: remove redundant return statement (#3297) Signed-off-by: Guangming Wang --- core/dnsserver/server.go | 1 - core/dnsserver/server_grpc.go | 5 ++--- core/dnsserver/server_https.go | 1 - core/dnsserver/server_tls.go | 1 - plugin/erratic/xfr.go | 1 - plugin/file/tree/tree.go | 1 - plugin/health/health.go | 1 - plugin/k8s_external/external_test.go | 2 +- plugin/kubernetes/external_test.go | 2 +- plugin/kubernetes/handler_test.go | 2 +- plugin/kubernetes/kubernetes_test.go | 2 +- plugin/kubernetes/ns_test.go | 2 +- plugin/kubernetes/reverse_test.go | 2 +- plugin/kubernetes/xfr.go | 1 - plugin/test/responsewriter.go | 4 ++-- 15 files changed, 10 insertions(+), 18 deletions(-) diff --git a/core/dnsserver/server.go b/core/dnsserver/server.go index 2f0454e48c0..a0d9bbc3af4 100644 --- a/core/dnsserver/server.go +++ b/core/dnsserver/server.go @@ -302,7 +302,6 @@ func (s *Server) OnStartupComplete() { if out != "" { fmt.Print(out) } - return } // Tracer returns the tracer in the server if defined. diff --git a/core/dnsserver/server_grpc.go b/core/dnsserver/server_grpc.go index 599f5c19759..e4f48ddec1f 100644 --- a/core/dnsserver/server_grpc.go +++ b/core/dnsserver/server_grpc.go @@ -93,7 +93,6 @@ func (s *ServergRPC) OnStartupComplete() { if out != "" { fmt.Print(out) } - return } // Stop stops the server. It blocks until the server is @@ -165,8 +164,8 @@ func (r *gRPCresponse) Write(b []byte) (int, error) { // These methods implement the dns.ResponseWriter interface from Go DNS. func (r *gRPCresponse) Close() error { return nil } func (r *gRPCresponse) TsigStatus() error { return nil } -func (r *gRPCresponse) TsigTimersOnly(b bool) { return } -func (r *gRPCresponse) Hijack() { return } +func (r *gRPCresponse) TsigTimersOnly(b bool) {} +func (r *gRPCresponse) Hijack() {} func (r *gRPCresponse) LocalAddr() net.Addr { return r.localAddr } func (r *gRPCresponse) RemoteAddr() net.Addr { return r.remoteAddr } func (r *gRPCresponse) WriteMsg(m *dns.Msg) error { r.Msg = m; return nil } diff --git a/core/dnsserver/server_https.go b/core/dnsserver/server_https.go index 93d62fa84ce..6b5a7238406 100644 --- a/core/dnsserver/server_https.go +++ b/core/dnsserver/server_https.go @@ -82,7 +82,6 @@ func (s *ServerHTTPS) OnStartupComplete() { if out != "" { fmt.Print(out) } - return } // Stop stops the server. It blocks until the server is totally stopped. diff --git a/core/dnsserver/server_tls.go b/core/dnsserver/server_tls.go index 95c2d691280..1bc6e6ac25f 100644 --- a/core/dnsserver/server_tls.go +++ b/core/dnsserver/server_tls.go @@ -78,5 +78,4 @@ func (s *ServerTLS) OnStartupComplete() { if out != "" { fmt.Print(out) } - return } diff --git a/plugin/erratic/xfr.go b/plugin/erratic/xfr.go index 8e2b3179493..eaaaf01fb22 100644 --- a/plugin/erratic/xfr.go +++ b/plugin/erratic/xfr.go @@ -48,5 +48,4 @@ func xfr(state request.Request, truncate bool) { tr.Out(state.W, state.Req, ch) state.W.Hijack() - return } diff --git a/plugin/file/tree/tree.go b/plugin/file/tree/tree.go index 3aeeba4d5d8..a6caafe163e 100644 --- a/plugin/file/tree/tree.go +++ b/plugin/file/tree/tree.go @@ -290,7 +290,6 @@ func (t *Tree) Delete(rr dns.RR) { if el.Empty() { t.deleteNode(rr) } - return } // DeleteNode deletes the node that matches rr according to Less(). diff --git a/plugin/health/health.go b/plugin/health/health.go index 2ac75f08d96..0db68eb7251 100644 --- a/plugin/health/health.go +++ b/plugin/health/health.go @@ -43,7 +43,6 @@ func (h *health) OnStartup() error { // We're always healthy. w.WriteHeader(http.StatusOK) io.WriteString(w, http.StatusText(http.StatusOK)) - return }) go func() { http.Serve(h.ln, h.mux) }() diff --git a/plugin/k8s_external/external_test.go b/plugin/k8s_external/external_test.go index 40c7cb92703..a83a2da720b 100644 --- a/plugin/k8s_external/external_test.go +++ b/plugin/k8s_external/external_test.go @@ -152,7 +152,7 @@ var tests = []test.Case{ type external struct{} func (external) HasSynced() bool { return true } -func (external) Run() { return } +func (external) Run() {} func (external) Stop() error { return nil } func (external) EpIndexReverse(string) []*object.Endpoints { return nil } func (external) SvcIndexReverse(string) []*object.Service { return nil } diff --git a/plugin/kubernetes/external_test.go b/plugin/kubernetes/external_test.go index dabbd94896b..7ccbb279886 100644 --- a/plugin/kubernetes/external_test.go +++ b/plugin/kubernetes/external_test.go @@ -79,7 +79,7 @@ func TestExternal(t *testing.T) { type external struct{} func (external) HasSynced() bool { return true } -func (external) Run() { return } +func (external) Run() {} func (external) Stop() error { return nil } func (external) EpIndexReverse(string) []*object.Endpoints { return nil } func (external) SvcIndexReverse(string) []*object.Service { return nil } diff --git a/plugin/kubernetes/handler_test.go b/plugin/kubernetes/handler_test.go index a979386b971..186c2a3a609 100644 --- a/plugin/kubernetes/handler_test.go +++ b/plugin/kubernetes/handler_test.go @@ -521,7 +521,7 @@ type APIConnServeTest struct { } func (a APIConnServeTest) HasSynced() bool { return !a.notSynced } -func (APIConnServeTest) Run() { return } +func (APIConnServeTest) Run() {} func (APIConnServeTest) Stop() error { return nil } func (APIConnServeTest) EpIndexReverse(string) []*object.Endpoints { return nil } func (APIConnServeTest) SvcIndexReverse(string) []*object.Service { return nil } diff --git a/plugin/kubernetes/kubernetes_test.go b/plugin/kubernetes/kubernetes_test.go index b259869a8b3..c0cf2535573 100644 --- a/plugin/kubernetes/kubernetes_test.go +++ b/plugin/kubernetes/kubernetes_test.go @@ -61,7 +61,7 @@ func TestEndpointHostname(t *testing.T) { type APIConnServiceTest struct{} func (APIConnServiceTest) HasSynced() bool { return true } -func (APIConnServiceTest) Run() { return } +func (APIConnServiceTest) Run() {} func (APIConnServiceTest) Stop() error { return nil } func (APIConnServiceTest) PodIndex(string) []*object.Pod { return nil } func (APIConnServiceTest) SvcIndexReverse(string) []*object.Service { return nil } diff --git a/plugin/kubernetes/ns_test.go b/plugin/kubernetes/ns_test.go index 5f9786a74ea..bafe53240e7 100644 --- a/plugin/kubernetes/ns_test.go +++ b/plugin/kubernetes/ns_test.go @@ -13,7 +13,7 @@ import ( type APIConnTest struct{} func (APIConnTest) HasSynced() bool { return true } -func (APIConnTest) Run() { return } +func (APIConnTest) Run() {} func (APIConnTest) Stop() error { return nil } func (APIConnTest) PodIndex(string) []*object.Pod { return nil } func (APIConnTest) SvcIndexReverse(string) []*object.Service { return nil } diff --git a/plugin/kubernetes/reverse_test.go b/plugin/kubernetes/reverse_test.go index 3c88fd4d51c..2af72522e8f 100644 --- a/plugin/kubernetes/reverse_test.go +++ b/plugin/kubernetes/reverse_test.go @@ -16,7 +16,7 @@ import ( type APIConnReverseTest struct{} func (APIConnReverseTest) HasSynced() bool { return true } -func (APIConnReverseTest) Run() { return } +func (APIConnReverseTest) Run() {} func (APIConnReverseTest) Stop() error { return nil } func (APIConnReverseTest) PodIndex(string) []*object.Pod { return nil } func (APIConnReverseTest) EpIndex(string) []*object.Endpoints { return nil } diff --git a/plugin/kubernetes/xfr.go b/plugin/kubernetes/xfr.go index 9ad6c9aca46..7759c5a47f8 100644 --- a/plugin/kubernetes/xfr.go +++ b/plugin/kubernetes/xfr.go @@ -184,7 +184,6 @@ func (k *Kubernetes) transfer(c chan dns.RR, zone string) { } } } - return } // emitAddressRecord generates a new A or AAAA record based on the msg.Service and writes it to diff --git a/plugin/test/responsewriter.go b/plugin/test/responsewriter.go index feaa8bd7e22..ce75657ce75 100644 --- a/plugin/test/responsewriter.go +++ b/plugin/test/responsewriter.go @@ -51,10 +51,10 @@ func (t *ResponseWriter) Close() error { return nil } func (t *ResponseWriter) TsigStatus() error { return nil } // TsigTimersOnly implement dns.ResponseWriter interface. -func (t *ResponseWriter) TsigTimersOnly(bool) { return } +func (t *ResponseWriter) TsigTimersOnly(bool) {} // Hijack implement dns.ResponseWriter interface. -func (t *ResponseWriter) Hijack() { return } +func (t *ResponseWriter) Hijack() {} // ResponseWriter6 returns fixed client and remote address in IPv6. The remote // address is always fe80::42:ff:feca:4c65 and port 40212. The local address is always ::1 and port 53. From 50bac4d3c3032005dc509ee4e95f1f0803c14c63 Mon Sep 17 00:00:00 2001 From: li mengyang Date: Tue, 24 Sep 2019 14:06:38 +0800 Subject: [PATCH 200/324] fix: delete unused var and const (#3294) Signed-off-by: hwdef --- coremain/run.go | 1 - plugin/dnssec/handler.go | 7 ------- plugin/dnstap/dnstapio/dnstap_encoder.go | 1 - plugin/forward/forward.go | 9 --------- plugin/forward/proxy.go | 1 - 5 files changed, 19 deletions(-) diff --git a/coremain/run.go b/coremain/run.go index 508c74c622a..b62878c3a03 100644 --- a/coremain/run.go +++ b/coremain/run.go @@ -184,7 +184,6 @@ func setVersion() { // Flags that control program flow or startup var ( conf string - logfile bool version bool plugins bool ) diff --git a/plugin/dnssec/handler.go b/plugin/dnssec/handler.go index 6153bf33110..a901b746a8f 100644 --- a/plugin/dnssec/handler.go +++ b/plugin/dnssec/handler.go @@ -55,13 +55,6 @@ var ( Help: "The number of elements in the dnssec cache.", }, []string{"server", "type"}) - cacheCapacity = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: plugin.Namespace, - Subsystem: "dnssec", - Name: "cache_capacity", - Help: "The dnssec cache's capacity.", - }, []string{"server", "type"}) - cacheHits = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: plugin.Namespace, Subsystem: "dnssec", diff --git a/plugin/dnstap/dnstapio/dnstap_encoder.go b/plugin/dnstap/dnstapio/dnstap_encoder.go index 07dfc841349..65b15f5875c 100644 --- a/plugin/dnstap/dnstapio/dnstap_encoder.go +++ b/plugin/dnstap/dnstapio/dnstap_encoder.go @@ -11,7 +11,6 @@ import ( ) const ( - frameLenSize = 4 protobufSize = 1024 * 1024 ) diff --git a/plugin/forward/forward.go b/plugin/forward/forward.go index 4abd80baedb..4e54ef466f3 100644 --- a/plugin/forward/forward.go +++ b/plugin/forward/forward.go @@ -199,15 +199,6 @@ var ( ErrCachedClosed = errors.New("cached connection was closed by peer") ) -// policy tells forward what policy for selecting upstream it uses. -type policy int - -const ( - randomPolicy policy = iota - roundRobinPolicy - sequentialPolicy -) - // options holds various options that can be set. type options struct { forceTCP bool diff --git a/plugin/forward/proxy.go b/plugin/forward/proxy.go index bf4d68dca91..78b2cf3b827 100644 --- a/plugin/forward/proxy.go +++ b/plugin/forward/proxy.go @@ -80,6 +80,5 @@ func (p *Proxy) start(duration time.Duration) { const ( maxTimeout = 2 * time.Second - minTimeout = 200 * time.Millisecond hcInterval = 500 * time.Millisecond ) From 2881eacd068669e3655322f63d52b5acbc43b582 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2019 06:24:49 +0000 Subject: [PATCH 201/324] build(deps): bump gopkg.in/DataDog/dd-trace-go.v1 from 1.17.0 to 1.18.0 (#3295) Bumps [gopkg.in/DataDog/dd-trace-go.v1](https://github.com/DataDog/dd-trace-go) from 1.17.0 to 1.18.0. - [Release notes](https://github.com/DataDog/dd-trace-go/releases) - [Commits](https://github.com/DataDog/dd-trace-go/compare/v1.17.0...v1.18.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index e098479aa9f..86bcfe3f9de 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( google.golang.org/api v0.10.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.23.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 diff --git a/go.sum b/go.sum index 3fbefaf6fd5..867337eda0c 100644 --- a/go.sum +++ b/go.sum @@ -438,6 +438,8 @@ google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54= gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 h1:n0pmVs3TxCRC9Jrd1/aNaB8roW5ZHMw0UOWPxAoUkkI= +gopkg.in/DataDog/dd-trace-go.v1 v1.18.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= From 47719756fed4946db4b1d3e9ca36d09e2c360cad Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2019 06:25:14 +0000 Subject: [PATCH 202/324] build(deps): bump github.com/aws/aws-sdk-go from 1.23.21 to 1.24.3 (#3296) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.23.21 to 1.24.3. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.23.21...v1.24.3) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 86bcfe3f9de..ee47cc0bad5 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.23.21 + github.com/aws/aws-sdk-go v1.24.3 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 867337eda0c..9f4a9740bfe 100644 --- a/go.sum +++ b/go.sum @@ -47,6 +47,8 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.23.21 h1:eVJT2C99cAjZlBY8+CJovf6AwrSANzAcYNuxdCB+SPk= github.com/aws/aws-sdk-go v1.23.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.24.3 h1:113A33abx/cqv0ga94D8z9elza1YEm749ltJI67Uhq4= +github.com/aws/aws-sdk-go v1.24.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From eb59e79207446dcf8f80c4fa5715b6a181b13013 Mon Sep 17 00:00:00 2001 From: Guangming Wang Date: Wed, 25 Sep 2019 20:23:43 +0800 Subject: [PATCH 203/324] plugin: cleanup code based on staticcheck warnings (#3302) TrimPrefix re-assign to former variable Signed-off-by: Guangming Wang --- plugin/acl/acl_test.go | 4 +--- plugin/dnstap/setup.go | 4 +--- plugin/etcd/etcd.go | 2 +- plugin/file/xfr.go | 2 +- plugin/hosts/hostsfile.go | 4 ++-- 5 files changed, 6 insertions(+), 10 deletions(-) diff --git a/plugin/acl/acl_test.go b/plugin/acl/acl_test.go index 9b23edc5384..ff3d86e1a0b 100644 --- a/plugin/acl/acl_test.go +++ b/plugin/acl/acl_test.go @@ -27,9 +27,7 @@ func (t *testResponseWriter) WriteMsg(m *dns.Msg) error { func NewTestControllerWithZones(input string, zones []string) *caddy.Controller { ctr := caddy.NewTestController("dns", input) - for _, zone := range zones { - ctr.ServerBlockKeys = append(ctr.ServerBlockKeys, zone) - } + ctr.ServerBlockKeys = append(ctr.ServerBlockKeys, zones...) return ctr } diff --git a/plugin/dnstap/setup.go b/plugin/dnstap/setup.go index 5baec5c6e92..06f26428c21 100644 --- a/plugin/dnstap/setup.go +++ b/plugin/dnstap/setup.go @@ -46,9 +46,7 @@ func parseConfig(d *caddyfile.Dispenser) (c config, err error) { c.target = servers[0] } else { // default to UNIX socket - if strings.HasPrefix(c.target, "unix://") { - c.target = c.target[7:] - } + c.target = strings.TrimPrefix(c.target, "unix://") c.socket = true } diff --git a/plugin/etcd/etcd.go b/plugin/etcd/etcd.go index a39cb0e62b6..305e8e492ee 100644 --- a/plugin/etcd/etcd.go +++ b/plugin/etcd/etcd.go @@ -83,7 +83,7 @@ func (e *Etcd) Records(ctx context.Context, state request.Request, exact bool) ( func (e *Etcd) get(ctx context.Context, path string, recursive bool) (*etcdcv3.GetResponse, error) { ctx, cancel := context.WithTimeout(ctx, etcdTimeout) defer cancel() - if recursive == true { + if recursive { if !strings.HasSuffix(path, "/") { path = path + "/" } diff --git a/plugin/file/xfr.go b/plugin/file/xfr.go index 659c5c9c3e6..f7192165ba2 100644 --- a/plugin/file/xfr.go +++ b/plugin/file/xfr.go @@ -45,8 +45,8 @@ func (x Xfr) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (in ch := make(chan *dns.Envelope) tr := new(dns.Transfer) wg := new(sync.WaitGroup) + wg.Add(1) go func() { - wg.Add(1) tr.Out(w, r, ch) wg.Done() }() diff --git a/plugin/hosts/hostsfile.go b/plugin/hosts/hostsfile.go index ed9763de17d..19cb7e7b3f5 100644 --- a/plugin/hosts/hostsfile.go +++ b/plugin/hosts/hostsfile.go @@ -244,8 +244,8 @@ func (h *Hostsfile) LookupStaticAddr(addr string) []string { h.RLock() defer h.RUnlock() - hosts1, _ := h.hmap.addr[addr] - hosts2, _ := h.inline.addr[addr] + hosts1 := h.hmap.addr[addr] + hosts2 := h.inline.addr[addr] if len(hosts1) == 0 && len(hosts2) == 0 { return nil From 27e22b069683b4477b4509d88ea57c8963e90ad1 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 25 Sep 2019 18:18:54 +0100 Subject: [PATCH 204/324] Use strings.ToLower in server (#3304) Automatically submitted. --- core/dnsserver/server.go | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/core/dnsserver/server.go b/core/dnsserver/server.go index a0d9bbc3af4..587f219ddc9 100644 --- a/core/dnsserver/server.go +++ b/core/dnsserver/server.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "runtime" + "strings" "sync" "time" @@ -217,27 +218,18 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) return } - q := r.Question[0].Name - b := make([]byte, len(q)) - var off int - var end bool - - var dshandler *Config - // Wrap the response writer in a ScrubWriter so we automatically make the reply fit in the client's buffer. w = request.NewScrubWriter(r, w) - for { - l := len(q[off:]) - for i := 0; i < l; i++ { - b[i] = q[off+i] - // normalize the name for the lookup - if b[i] >= 'A' && b[i] <= 'Z' { - b[i] |= ('a' - 'A') - } - } + q := strings.ToLower(r.Question[0].Name) + var ( + off int + end bool + dshandler *Config + ) - if h, ok := s.zones[string(b[:l])]; ok { + for { + if h, ok := s.zones[q[off:]]; ok { if r.Question[0].Qtype != dns.TypeDS { if h.FilterFunc == nil { rcode, _ := h.pluginChain.ServeDNS(ctx, w, r) From 9a5e4fa1a0cc9ee40f7cad6518eccc2d78839370 Mon Sep 17 00:00:00 2001 From: xieyanker Date: Thu, 26 Sep 2019 20:19:45 +0800 Subject: [PATCH 205/324] fix mis-spelling (#3310) Signed-off-by: xieyanker --- plugin/autopath/autopath.go | 2 +- plugin/cache/cache.go | 2 +- plugin/dnssec/dnskey.go | 4 ++-- plugin/forward/fuzz.go | 2 +- plugin/kubernetes/autopath.go | 2 +- plugin/kubernetes/object/service.go | 2 +- plugin/loadbalance/handler.go | 4 ++-- plugin/ready/list.go | 2 +- plugin/rewrite/edns0.go | 2 +- plugin/rewrite/rewrite.go | 2 +- plugin/secondary/setup_test.go | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/plugin/autopath/autopath.go b/plugin/autopath/autopath.go index c2f541644f7..8583988d9ba 100644 --- a/plugin/autopath/autopath.go +++ b/plugin/autopath/autopath.go @@ -5,7 +5,7 @@ client's search path resolution by performing these lookups on the server... The server has a copy (via AutoPathFunc) of the client's search path and on receiving a query it first establishes if the suffix matches the FIRST configured element. If no match can be found the query will be forwarded up the plugin -chain without interference (iff 'fallthrough' has been set). +chain without interference (if 'fallthrough' has been set). If the query is deemed to fall in the search path the server will perform the queries with each element of the search path appended in sequence until a diff --git a/plugin/cache/cache.go b/plugin/cache/cache.go index 69de55f17de..43a40c40921 100644 --- a/plugin/cache/cache.go +++ b/plugin/cache/cache.go @@ -15,7 +15,7 @@ import ( "github.com/miekg/dns" ) -// Cache is plugin that looks up responses in a cache and caches replies. +// Cache is a plugin that looks up responses in a cache and caches replies. // It has a success and a denial of existence cache. type Cache struct { Next plugin.Handler diff --git a/plugin/dnssec/dnskey.go b/plugin/dnssec/dnskey.go index ea769514db3..1a2cf93438b 100644 --- a/plugin/dnssec/dnskey.go +++ b/plugin/dnssec/dnskey.go @@ -79,12 +79,12 @@ func (d Dnssec) getDNSKEY(state request.Request, zone string, do bool, server st return m } -// Return true iff this is a zone key with the SEP bit unset. This implies a ZSK (rfc4034 2.1.1). +// Return true if this is a zone key with the SEP bit unset. This implies a ZSK (rfc4034 2.1.1). func (k DNSKEY) isZSK() bool { return k.K.Flags&(1<<8) == (1<<8) && k.K.Flags&1 == 0 } -// Return true iff this is a zone key with the SEP bit set. This implies a KSK (rfc4034 2.1.1). +// Return true if this is a zone key with the SEP bit set. This implies a KSK (rfc4034 2.1.1). func (k DNSKEY) isKSK() bool { return k.K.Flags&(1<<8) == (1<<8) && k.K.Flags&1 == 1 } diff --git a/plugin/forward/fuzz.go b/plugin/forward/fuzz.go index cbb4edf01d4..0482f63adbf 100644 --- a/plugin/forward/fuzz.go +++ b/plugin/forward/fuzz.go @@ -11,7 +11,7 @@ import ( var f *Forward -// abuse init to setup a environment to test against. This start another server to that will +// abuse init to setup an environment to test against. This start another server to that will // reflect responses. func init() { f = New() diff --git a/plugin/kubernetes/autopath.go b/plugin/kubernetes/autopath.go index 06bde0a6904..33bf401f59b 100644 --- a/plugin/kubernetes/autopath.go +++ b/plugin/kubernetes/autopath.go @@ -49,7 +49,7 @@ func (k *Kubernetes) AutoPath(state request.Request) []string { return search } -// podWithIP return the api.Pod for source IP ip. It returns nil if nothing can be found. +// podWithIP return the api.Pod for source IP. It returns nil if nothing can be found. func (k *Kubernetes) podWithIP(ip string) *object.Pod { ps := k.APIConn.PodIndex(ip) if len(ps) == 0 { diff --git a/plugin/kubernetes/object/service.go b/plugin/kubernetes/object/service.go index 3c830436248..a41100ab927 100644 --- a/plugin/kubernetes/object/service.go +++ b/plugin/kubernetes/object/service.go @@ -46,7 +46,7 @@ func ToService(obj interface{}) interface{} { } if len(svc.Spec.Ports) == 0 { - // Add sentinal if there are no ports. + // Add sentinel if there are no ports. s.Ports = []api.ServicePort{{Port: -1}} } else { s.Ports = make([]api.ServicePort, len(svc.Spec.Ports)) diff --git a/plugin/loadbalance/handler.go b/plugin/loadbalance/handler.go index 4ec79c098f8..ac046c8d073 100644 --- a/plugin/loadbalance/handler.go +++ b/plugin/loadbalance/handler.go @@ -1,4 +1,4 @@ -// Package loadbalance is plugin for rewriting responses to do "load balancing" +// Package loadbalance is a plugin for rewriting responses to do "load balancing" package loadbalance import ( @@ -9,7 +9,7 @@ import ( "github.com/miekg/dns" ) -// RoundRobin is plugin to rewrite responses for "load balancing". +// RoundRobin is a plugin to rewrite responses for "load balancing". type RoundRobin struct { Next plugin.Handler } diff --git a/plugin/ready/list.go b/plugin/ready/list.go index c283748eca8..e7d2584d878 100644 --- a/plugin/ready/list.go +++ b/plugin/ready/list.go @@ -6,7 +6,7 @@ import ( "sync" ) -// list is structure that holds the plugins that signals readiness for this server block. +// list is a structure that holds the plugins that signals readiness for this server block. type list struct { sync.RWMutex rs []Readiness diff --git a/plugin/rewrite/edns0.go b/plugin/rewrite/edns0.go index 34aaf3d6772..2872d089ac8 100644 --- a/plugin/rewrite/edns0.go +++ b/plugin/rewrite/edns0.go @@ -1,4 +1,4 @@ -// Package rewrite is plugin for rewriting requests internally to something different. +// Package rewrite is a plugin for rewriting requests internally to something different. package rewrite import ( diff --git a/plugin/rewrite/rewrite.go b/plugin/rewrite/rewrite.go index e72e975b26a..2eabf6df280 100644 --- a/plugin/rewrite/rewrite.go +++ b/plugin/rewrite/rewrite.go @@ -29,7 +29,7 @@ const ( Continue = "continue" ) -// Rewrite is plugin to rewrite requests internally before being handled. +// Rewrite is a plugin to rewrite requests internally before being handled. type Rewrite struct { Next plugin.Handler Rules []Rule diff --git a/plugin/secondary/setup_test.go b/plugin/secondary/setup_test.go index 47c8abac97f..7fc36f67907 100644 --- a/plugin/secondary/setup_test.go +++ b/plugin/secondary/setup_test.go @@ -55,7 +55,7 @@ func TestSecondaryParse(t *testing.T) { } } - // This is only set *iff* we have a zone (i.e. not in all tests above) + // This is only set *if* we have a zone (i.e. not in all tests above) for _, v := range s.Z { if x := v.TransferFrom[0]; x != test.transferFrom { t.Fatalf("Test %d transform from names don't match expected %q, but got %q", i, test.transferFrom, x) From f2df37a1fe87d9a17f31ff8db3ea9c3387d10523 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 27 Sep 2019 11:09:59 +0100 Subject: [PATCH 206/324] plugin/forward: metrics: make docs reflect reality (#3311) Remove talk about labels that are not added. Signed-off-by: Miek Gieben --- plugin/forward/README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/plugin/forward/README.md b/plugin/forward/README.md index 92ffb665527..ae7ce67ba83 100644 --- a/plugin/forward/README.md +++ b/plugin/forward/README.md @@ -104,9 +104,8 @@ If monitoring is enabled (via the *prometheus* directive) then the following met and we are randomly (this always uses the `random` policy) spraying to an upstream. * `coredns_forward_socket_count_total{to}` - number of cached sockets per upstream. -Where `to` is one of the upstream servers (**TO** from the config), `proto` is the protocol used by -the incoming query ("tcp" or "udp"), and family the transport family ("1" for IPv4, and "2" for -IPv6). +Where `to` is one of the upstream servers (**TO** from the config), `rcode` is the returned RCODE +from the upstream. ## Examples From 8af468558766a80af7025d6480389538a1c2c0ea Mon Sep 17 00:00:00 2001 From: Guangming Wang Date: Fri, 27 Sep 2019 18:10:34 +0800 Subject: [PATCH 207/324] cleanup code by lint (#3312) Signed-off-by: Guangming Wang --- plugin/kubernetes/ns.go | 3 +-- plugin/rewrite/name.go | 9 ++------- plugin/rewrite/reverter.go | 4 +--- plugin/rewrite/rewrite_test.go | 2 +- request/request.go | 2 +- 5 files changed, 6 insertions(+), 14 deletions(-) diff --git a/plugin/kubernetes/ns.go b/plugin/kubernetes/ns.go index f21890cc994..2d4bc398a5c 100644 --- a/plugin/kubernetes/ns.go +++ b/plugin/kubernetes/ns.go @@ -65,8 +65,7 @@ func (k *Kubernetes) nsAddrs(external bool, zone string) []dns.RR { } // Create an RR slice of collected IPs - var rrs []dns.RR - rrs = make([]dns.RR, len(svcIPs)) + rrs := make([]dns.RR, len(svcIPs)) for i, ip := range svcIPs { if ip.To4() == nil { rr := new(dns.AAAA) diff --git a/plugin/rewrite/name.go b/plugin/rewrite/name.go index e1c2a11143c..9302d563a3b 100644 --- a/plugin/rewrite/name.go +++ b/plugin/rewrite/name.go @@ -104,9 +104,7 @@ func (rule *regexNameRule) Rewrite(ctx context.Context, state request.Request) R s := rule.Replacement for groupIndex, groupValue := range regexGroups { groupIndexStr := "{" + strconv.Itoa(groupIndex) + "}" - if strings.Contains(s, groupIndexStr) { - s = strings.Replace(s, groupIndexStr, groupValue, -1) - } + s = strings.Replace(s, groupIndexStr, groupValue, -1) } state.Req.Question[0].Name = s return RewriteDone @@ -264,10 +262,7 @@ func (rule *regexNameRule) GetResponseRule() ResponseRule { return rule.Response // hasClosingDot return true if s has a closing dot at the end. func hasClosingDot(s string) bool { - if strings.HasSuffix(s, ".") { - return true - } - return false + return strings.HasSuffix(s, ".") } // getSubExprUsage return the number of subexpressions used in s. diff --git a/plugin/rewrite/reverter.go b/plugin/rewrite/reverter.go index 5a55c7ad5e4..00e41454e4a 100644 --- a/plugin/rewrite/reverter.go +++ b/plugin/rewrite/reverter.go @@ -59,9 +59,7 @@ func (r *ResponseReverter) WriteMsg(res *dns.Msg) error { s := rule.Replacement for groupIndex, groupValue := range regexGroups { groupIndexStr := "{" + strconv.Itoa(groupIndex) + "}" - if strings.Contains(s, groupIndexStr) { - s = strings.Replace(s, groupIndexStr, groupValue, -1) - } + s = strings.Replace(s, groupIndexStr, groupValue, -1) } name = s isNameRewritten = true diff --git a/plugin/rewrite/rewrite_test.go b/plugin/rewrite/rewrite_test.go index 31692195438..ecab0778708 100644 --- a/plugin/rewrite/rewrite_test.go +++ b/plugin/rewrite/rewrite_test.go @@ -422,7 +422,7 @@ func optsEqual(a, b []dns.EDNS0) bool { if aa.SourceScope != bb.SourceScope { return false } - if !bytes.Equal(aa.Address, bb.Address) { + if !aa.Address.Equal(bb.Address) { return false } } else { diff --git a/request/request.go b/request/request.go index 0ce34816cd7..6f1a1de0e5f 100644 --- a/request/request.go +++ b/request/request.go @@ -358,7 +358,7 @@ func (r *Request) Match(reply *dns.Msg) bool { return false } - if reply.Response == false { + if !reply.Response { return false } From 7328d3e8c989f28a4b3480c907fca8e7d43aa76a Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 27 Sep 2019 11:10:47 +0100 Subject: [PATCH 208/324] plugin/reload: reflow documentation (#3313) * plugin/reload: reflow documentation For some reason these we're all bullets points, which made for awkward reading. Signed-off-by: Miek Gieben * typo Signed-off-by: Miek Gieben * Update plugin/reload/README.md Co-Authored-By: Michael Grosser --- plugin/reload/README.md | 11 ++++++----- plugin/reload/reload.go | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/plugin/reload/README.md b/plugin/reload/README.md index ca80f8602f1..f50116cf437 100644 --- a/plugin/reload/README.md +++ b/plugin/reload/README.md @@ -38,11 +38,12 @@ This plugin can only be used once per Server Block. reload [INTERVAL] [JITTER] ~~~ -* The plugin will check for changes every **INTERVAL**, subject to +/- the **JITTER** duration -* **INTERVAL** and **JITTER** are Golang (durations)[https://golang.org/pkg/time/#ParseDuration] -* Default **INTERVAL** is 30s, default **JITTER** is 15s -* Minimal value for **INTERVAL** is 2s, and for **JITTER** is 1s -* If **JITTER** is more than half of **INTERVAL**, it will be set to half of **INTERVAL** +The plugin will check for changes every **INTERVAL**, subject to +/- the **JITTER** duration. + +* **INTERVAL** and **JITTER** are Golang (durations)[[https://golang.org/pkg/time/#ParseDuration](https://golang.org/pkg/time/#ParseDuration)]. + The default **INTERVAL** is 30s, default **JITTER** is 15s, the minimal value for **INTERVAL** + is 2s, and for **JITTER** it is 1s. If **JITTER** is more than half of **INTERVAL**, it will be + set to half of **INTERVAL** ## Examples diff --git a/plugin/reload/reload.go b/plugin/reload/reload.go index 0e01f63b4e5..817582c8c3b 100644 --- a/plugin/reload/reload.go +++ b/plugin/reload/reload.go @@ -1,3 +1,4 @@ +// Package reload periodically checks if the Corefile has changed, and reloads if so. package reload import ( @@ -11,7 +12,6 @@ import ( "github.com/caddyserver/caddy/caddyfile" ) -// reload periodically checks if the Corefile has changed, and reloads if so const ( unused = 0 maybeUsed = 1 From 4568a3c25af8540c33fa59ca38ed82b572235d83 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 27 Sep 2019 13:30:22 +0100 Subject: [PATCH 209/324] doc: run make -f Makefile.doc (#3314) add the acl manual page; mechanical change otherwise. Signed-off-by: Miek Gieben --- man/coredns-acl.7 | 105 +++++++++++++++++++++++++++++++++++++ man/coredns-any.7 | 6 +-- man/coredns-auto.7 | 6 +-- man/coredns-autopath.7 | 2 +- man/coredns-azure.7 | 4 +- man/coredns-cache.7 | 14 ++--- man/coredns-cancel.7 | 10 ++-- man/coredns-chaos.7 | 4 +- man/coredns-clouddns.7 | 6 +-- man/coredns-debug.7 | 2 +- man/coredns-dnssec.7 | 6 +-- man/coredns-dnstap.7 | 6 +-- man/coredns-erratic.7 | 15 +++--- man/coredns-errors.7 | 10 ++-- man/coredns-etcd.7 | 26 +++++---- man/coredns-file.7 | 2 +- man/coredns-forward.7 | 7 ++- man/coredns-grpc.7 | 2 +- man/coredns-health.7 | 2 +- man/coredns-hosts.7 | 17 ++++-- man/coredns-import.7 | 8 +-- man/coredns-k8s_external.7 | 20 +++---- man/coredns-kubernetes.7 | 10 ++-- man/coredns-loadbalance.7 | 10 ++-- man/coredns-log.7 | 4 +- man/coredns-loop.7 | 4 +- man/coredns-metadata.7 | 14 ++--- man/coredns-metrics.7 | 2 +- man/coredns-nsid.7 | 6 +-- man/coredns-pprof.7 | 2 +- man/coredns-ready.7 | 2 +- man/coredns-reload.7 | 22 ++++---- man/coredns-rewrite.7 | 2 +- man/coredns-route53.7 | 15 +++--- man/coredns-secondary.7 | 2 +- man/coredns-sign.7 | 4 +- man/coredns-template.7 | 2 +- man/coredns-tls.7 | 10 ++-- man/coredns-whoami.7 | 4 +- 39 files changed, 259 insertions(+), 136 deletions(-) create mode 100644 man/coredns-acl.7 diff --git a/man/coredns-acl.7 b/man/coredns-acl.7 new file mode 100644 index 00000000000..9a2ded0dd59 --- /dev/null +++ b/man/coredns-acl.7 @@ -0,0 +1,105 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ACL" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" + +.PP +\fIacl\fP - enforces access control policies on source ip and prevents unauthorized access to DNS servers. + +.SH "DESCRIPTION" +.PP +With \fB\fCacl\fR enabled, users are able to block suspicous DNS queries by configuring IP filter rule sets, i.e. allowing authorized queries to recurse or blocking unauthorized queries. + +.PP +This plugin can be used multiple times per Server Block. + +.SH "SYNTAX" +.PP +.RS + +.nf +acl [ZONES...] { + ACTION [type QTYPE...] [net SOURCE...] +} + +.fi +.RE + +.IP \(bu 4 +\fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block are used. +.IP \(bu 4 +\fBACTION\fP (\fIallow\fP or \fIblock\fP) defines the way to deal with DNS queries matched by this rule. The default action is \fIallow\fP, which means a DNS query not matched by any rules will be allowed to recurse. +.IP \(bu 4 +\fBQTYPE\fP is the query type to match for the requests to be allowed or blocked. Common resource record types are supported. \fB\fC*\fR stands for all record types. The default behavior for an omitted \fB\fCtype QTYPE...\fR is to match all kinds of DNS queries (same as \fB\fCtype *\fR). +.IP \(bu 4 +\fBSOURCE\fP is the source IP address to match for the requests to be allowed or blocked. Typical CIDR notation and single IP address are supported. \fB\fC*\fR stands for all possible source IP addresses. + + +.SH "EXAMPLES" +.PP +To demonstrate the usage of plugin acl, here we provide some typical examples. + +.PP +Block all DNS queries with record type A from 192.168.0.0/16: + +.PP +.RS + +.nf +\&. { + acl { + block type A net 192.168.0.0/16 + } +} + +.fi +.RE + +.PP +Block all DNS queries from 192.168.0.0/16 except for 192.168.1.0/24: + +.PP +.RS + +.nf +\&. { + acl { + allow net 192.168.1.0/24 + block net 192.168.0.0/16 + } +} + +.fi +.RE + +.PP +Allow only DNS queries from 192.168.0.0/24 and 192.168.1.0/24: + +.PP +.RS + +.nf +\&. { + acl { + allow net 192.168.0.0/16 192.168.1.0/24 + block + } +} + +.fi +.RE + +.PP +Block all DNS queries from 192.168.1.0/24 towards a.example.org: + +.PP +.RS + +.nf +example.org { + acl a.example.org { + block net 192.168.1.0/24 + } +} + +.fi +.RE + diff --git a/man/coredns-any.7 b/man/coredns-any.7 index 2fc03ec09a7..d79953bc25e 100644 --- a/man/coredns-any.7 +++ b/man/coredns-any.7 @@ -1,9 +1,9 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ANY" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ANY" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIany\fP - give a minimal response to ANY queries. +\fIany\fP - gives a minimal response to ANY queries. .SH "DESCRIPTION" .PP @@ -41,7 +41,7 @@ A \fB\fCdig +nocmd ANY example.org +noall +answer\fR now returns: .RS .nf -example.org. 8482 IN HINFO "ANY obsoleted" "See RFC 8482" +example.org. 8482 IN HINFO "ANY obsoleted" "See RFC 8482" .fi .RE diff --git a/man/coredns-auto.7 b/man/coredns-auto.7 index fcd67317550..22d4555bc65 100644 --- a/man/coredns-auto.7 +++ b/man/coredns-auto.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-AUTO" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-AUTO" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -77,8 +77,8 @@ notifies to 10.240.1.1 .RS .nf -\&. { - auto org { +org { + auto { directory /etc/coredns/zones/org transfer to * transfer to 10.240.1.1 diff --git a/man/coredns-autopath.7 b/man/coredns-autopath.7 index e96540e65de..4c3304b2904 100644 --- a/man/coredns-autopath.7 +++ b/man/coredns-autopath.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-AUTOPATH" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-AUTOPATH" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-azure.7 b/man/coredns-azure.7 index 2a4bdcd0129..42527ae20a1 100644 --- a/man/coredns-azure.7 +++ b/man/coredns-azure.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-AZURE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-AZURE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-cache.7 b/man/coredns-cache.7 index a229de73b40..1707f65b6d7 100644 --- a/man/coredns-cache.7 +++ b/man/coredns-cache.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CACHE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CACHE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -133,18 +133,18 @@ Proxy to Google Public DNS and only cache responses for example.org (or below). .RE .PP -Enable caching for all zones, keep a positive cache size of 5000 and a negative cache size of 2500: +Enable caching for \fB\fCexample.org\fR, keep a positive cache size of 5000 and a negative cache size of 2500: .PP .RS .nf - . { - cache { - success 5000 - denial 2500 +example.org { + cache { + success 5000 + denial 2500 } - } +} .fi .RE diff --git a/man/coredns-cancel.7 b/man/coredns-cancel.7 index 29480a9339f..a628d9ad05d 100644 --- a/man/coredns-cancel.7 +++ b/man/coredns-cancel.7 @@ -1,9 +1,9 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CANCEL" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CANCEL" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIcancel\fP - a plugin that cancels a request's context after 5001 milliseconds. +\fIcancel\fP - cancels a request's context after 5001 milliseconds. .SH "DESCRIPTION" .PP @@ -11,7 +11,7 @@ The \fIcancel\fP plugin creates a canceling context for each request. It adds a triggered after 5001 milliseconds. .PP -The 5001 number is chosen because the default timeout for DNS clients is 5 seconds, after that they +The 5001 number was chosen because the default timeout for DNS clients is 5 seconds, after that they give up. .PP @@ -37,7 +37,7 @@ cancel [TIMEOUT] .RS .nf -\&. { +example.org { cancel whoami } @@ -52,7 +52,7 @@ Or with a custom timeout: .RS .nf -\&. { +example.org { cancel 1s whoami } diff --git a/man/coredns-chaos.7 b/man/coredns-chaos.7 index 72bb0b2f1b6..1769f3d2dc2 100644 --- a/man/coredns-chaos.7 +++ b/man/coredns-chaos.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CHAOS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CHAOS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -70,7 +70,7 @@ And test with \fB\fCdig\fR: % dig @localhost CH TXT version.bind \&... ;; ANSWER SECTION: -version.bind. 0 CH TXT "CoreDNS\-001" +version.bind. 0 CH TXT "CoreDNS\-001" \&... .fi diff --git a/man/coredns-clouddns.7 b/man/coredns-clouddns.7 index 625af666a4a..c09af441edc 100644 --- a/man/coredns-clouddns.7 +++ b/man/coredns-clouddns.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CLOUDDNS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CLOUDDNS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -60,7 +60,7 @@ Enable clouddns with implicit GCP credentials and resolve CNAMEs via 10.0.0.1: .RS .nf -\&. { +example.org { clouddns example.org.:gcp\-example\-project:example\-zone forward . 10.0.0.1 } @@ -75,7 +75,7 @@ Enable clouddns with fallthrough: .RS .nf -\&. { +example.org { clouddns example.org.:gcp\-example\-project:example\-zone clouddns example.com.:gcp\-example\-project:example\-zone\-2 { fallthrough example.gov. } diff --git a/man/coredns-debug.7 b/man/coredns-debug.7 index b4f857a6e55..a9eb0451121 100644 --- a/man/coredns-debug.7 +++ b/man/coredns-debug.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-DEBUG" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-DEBUG" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-dnssec.7 b/man/coredns-dnssec.7 index 373923a0999..df17d282668 100644 --- a/man/coredns-dnssec.7 +++ b/man/coredns-dnssec.7 @@ -1,13 +1,13 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-DNSSEC" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-DNSSEC" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIdnssec\fP - enable on-the-fly DNSSEC signing of served data. +\fIdnssec\fP - enables on-the-fly DNSSEC signing of served data. .SH "DESCRIPTION" .PP -With \fIdnssec\fP any reply that doesn't (or can't) do DNSSEC will get signed on the fly. Authenticated +With \fIdnssec\fP, any reply that doesn't (or can't) do DNSSEC will get signed on the fly. Authenticated denial of existence is implemented with NSEC black lies. Using ECDSA as an algorithm is preferred as this leads to smaller signatures (compared to RSA). NSEC3 is \fInot\fP supported. diff --git a/man/coredns-dnstap.7 b/man/coredns-dnstap.7 index c0e95412eba..60fa63a49b9 100644 --- a/man/coredns-dnstap.7 +++ b/man/coredns-dnstap.7 @@ -1,13 +1,13 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-DNSTAP" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-DNSTAP" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIdnstap\fP - enable logging to dnstap. +\fIdnstap\fP - enables logging to dnstap. .SH "DESCRIPTION" .PP -dnstap is a flexible, structured binary log format for DNS software: http://dnstap.info +dnstap is a flexible, structured binary log format for DNS software; see http://dnstap.info \[la]http://dnstap.info\[ra]. With this plugin you make CoreDNS output dnstap logging. diff --git a/man/coredns-erratic.7 b/man/coredns-erratic.7 index 782a976fa18..61688ef9d78 100644 --- a/man/coredns-erratic.7 +++ b/man/coredns-erratic.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ERRATIC" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ERRATIC" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -55,7 +55,7 @@ This plugin reports readiness to the ready plugin. .RS .nf -\&. { +example.org { erratic { drop 3 } @@ -71,7 +71,7 @@ Or even shorter if the defaults suits you. Note this only drops queries, it does .RS .nf -\&. { +example.org { erratic } @@ -85,7 +85,7 @@ Delay 1 in 3 queries for 50ms .RS .nf -\&. { +example.org { erratic { delay 3 50ms } @@ -101,7 +101,7 @@ Delay 1 in 3 and truncate 1 in 5. .RS .nf -\&. { +example.org { erratic { delay 3 5ms truncate 5 @@ -118,7 +118,7 @@ Drop every second query. .RS .nf -\&. { +example.org { erratic { drop 2 truncate 2 @@ -131,7 +131,6 @@ Drop every second query. .SH "ALSO SEE" .PP RFC 3849 -\[la]https://tools.ietf.org/html/rfc3849\[ra] and -RFC 5737 +\[la]https://tools.ietf.org/html/rfc3849\[ra] and RFC 5737 \[la]https://tools.ietf.org/html/rfc5737\[ra]. diff --git a/man/coredns-errors.7 b/man/coredns-errors.7 index 8a159468c39..072f1c3ba0c 100644 --- a/man/coredns-errors.7 +++ b/man/coredns-errors.7 @@ -1,9 +1,9 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ERRORS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ERRORS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIerrors\fP - enable error logging. +\fIerrors\fP - enables error logging. .SH "DESCRIPTION" .PP @@ -33,7 +33,7 @@ Extra knobs are available with an expanded syntax: .nf errors { - consolidate DURATION REGEXP + consolidate DURATION REGEXP } .fi @@ -59,13 +59,13 @@ For better performance, it's recommended to use the \fB\fC^\fR or \fB\fC$\fR met .SH "EXAMPLES" .PP -Use the \fIwhoami\fP to respond to queries and Log errors to standard output. +Use the \fIwhoami\fP to respond to queries in the example.org domain and Log errors to standard output. .PP .RS .nf -\&. { +example.org { whoami errors } diff --git a/man/coredns-etcd.7 b/man/coredns-etcd.7 index 57d8a2e5c1e..8221ed63e2a 100644 --- a/man/coredns-etcd.7 +++ b/man/coredns-etcd.7 @@ -1,9 +1,9 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ETCD" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ETCD" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIetcd\fP - enable SkyDNS service discovery from etcd. +\fIetcd\fP - enables SkyDNS service discovery from etcd. .SH "DESCRIPTION" .PP @@ -12,7 +12,7 @@ a generic DNS zone data plugin. Only a subset of DNS record types are implemente and delegations are not handled at all. .PP -The data in etcd instance has to be encoded as +The data in the etcd instance has to be encoded as a message \[la]https://github.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service.go#L26\[ra] like SkyDNS @@ -116,15 +116,19 @@ This is the default SkyDNS setup, with everything specified in full: .RS .nf -\&. { - etcd skydns.local { +skydns.local { + etcd { path /skydns endpoint http://localhost:2379 } prometheus - cache 160 skydns.local + cache loadbalance +} + +\&. { forward . 8.8.8.8:53 8.8.4.4:53 + cache } .fi @@ -138,12 +142,16 @@ when resolving external pointing CNAMEs. .RS .nf -\&. { - etcd skydns.local { +skydns.local { + etcd { path /skydns } - cache 160 skydns.local + cache +} + +\&. { forward . /etc/resolv.conf + cache } .fi diff --git a/man/coredns-file.7 b/man/coredns-file.7 index 193cb6d8bf1..bacd63c7c12 100644 --- a/man/coredns-file.7 +++ b/man/coredns-file.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-FILE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FILE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-forward.7 b/man/coredns-forward.7 index 52bb9c4f869..8efabc22e9c 100644 --- a/man/coredns-forward.7 +++ b/man/coredns-forward.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-FORWARD" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FORWARD" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -160,9 +160,8 @@ and we are randomly (this always uses the \fB\fCrandom\fR policy) spraying to an .PP -Where \fB\fCto\fR is one of the upstream servers (\fBTO\fP from the config), \fB\fCproto\fR is the protocol used by -the incoming query ("tcp" or "udp"), and family the transport family ("1" for IPv4, and "2" for -IPv6). +Where \fB\fCto\fR is one of the upstream servers (\fBTO\fP from the config), \fB\fCrcode\fR is the returned RCODE +from the upstream. .SH "EXAMPLES" .PP diff --git a/man/coredns-grpc.7 b/man/coredns-grpc.7 index 3068c0043d2..bcf3b469bb1 100644 --- a/man/coredns-grpc.7 +++ b/man/coredns-grpc.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-GRPC" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-GRPC" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-health.7 b/man/coredns-health.7 index 78d95720822..ecae0d7e2e8 100644 --- a/man/coredns-health.7 +++ b/man/coredns-health.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-HEALTH" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HEALTH" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-hosts.7 b/man/coredns-hosts.7 index 0bb28943849..c9cf7e65712 100644 --- a/man/coredns-hosts.7 +++ b/man/coredns-hosts.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-HOSTS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HOSTS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -93,6 +93,16 @@ is authoritative. If specific zones are listed (for example \fB\fCin-addr.arpa\f queries for those zones will be subject to fallthrough. +.SH "METRICS" +.PP +If monitoring is enabled (via the \fIprometheus\fP directive) then the following metrics are exported: + +.IP \(bu 4 +\fB\fCcoredns_hosts_entries_count{}\fR - The combined number of entries in hosts and Corefile. +.IP \(bu 4 +\fB\fCcoredns_hosts_reload_timestamp_seconds{}\fR - The timestamp of the last reload of hosts file. + + .SH "EXAMPLES" .PP Load \fB\fC/etc/hosts\fR file. @@ -146,11 +156,12 @@ Load hosts file inlined in Corefile. .RS .nf -\&. { - hosts example.hosts example.org { +example.hosts example.org { + hosts { 10.0.0.1 example.org fallthrough } + whoami } .fi diff --git a/man/coredns-import.7 b/man/coredns-import.7 index 757e2a348ae..918e4a2cd35 100644 --- a/man/coredns-import.7 +++ b/man/coredns-import.7 @@ -1,13 +1,13 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-IMPORT" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-IMPORT" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIimport\fP - include files or reference snippets from a Corefile. +\fIimport\fP - includes files or references snippets from a Corefile. .SH "DESCRIPTION" .PP -The \fIimport\fP plugin can be used to include files into the main configuration. Another use it to +The \fIimport\fP plugin can be used to include files into the main configuration. Another use is to reference predefined snippets. Both can help to avoid some duplication. .PP @@ -45,7 +45,7 @@ label surrounded by parentheses: .nf (mysnippet) { - ... + ... } .fi diff --git a/man/coredns-k8s_external.7 b/man/coredns-k8s_external.7 index 4293fff1cba..1cabc3e7f3d 100644 --- a/man/coredns-k8s_external.7 +++ b/man/coredns-k8s_external.7 @@ -1,9 +1,9 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-K8S_EXTERNAL" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-K8S_EXTERNAL" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIk8s_external\fP - resolve load balancer and external IPs from outside kubernetes clusters. +\fIk8s_external\fP - resolves load balancer and external IPs from outside Kubernetes clusters. .SH "DESCRIPTION" .PP @@ -22,14 +22,14 @@ By default the apex of the zone will look like the following (assuming the zone .RS .nf -example.org. 5 IN SOA ns1.dns.example.org. hostmaster.example.org. ( - 12345 ; serial - 14400 ; refresh (4 hours) - 3600 ; retry (1 hour) - 604800 ; expire (1 week) - 5 ; minimum (4 hours) - ) -example.org 5 IN NS ns1.dns.example.org. +example.org. 5 IN SOA ns1.dns.example.org. hostmaster.example.org. ( + 12345 ; serial + 14400 ; refresh (4 hours) + 3600 ; retry (1 hour) + 604800 ; expire (1 week) + 5 ; minimum (4 hours) + ) +example.org 5 IN NS ns1.dns.example.org. ns1.dns.example.org. 5 IN A .... ns1.dns.example.org. 5 IN AAAA .... diff --git a/man/coredns-kubernetes.7 b/man/coredns-kubernetes.7 index ddc7cb021b1..2c7baf8c3e9 100644 --- a/man/coredns-kubernetes.7 +++ b/man/coredns-kubernetes.7 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-KUBERNETES" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-KUBERNETES" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIkubernetes\fP - enables the reading zone data from a Kubernetes cluster. +\fIkubernetes\fP - enables reading zone data from a Kubernetes cluster. .SH "DESCRIPTION" .PP @@ -302,8 +302,8 @@ For example, wildcards can be used to resolve all Endpoints for a Service as \fB .RS .nf -*.service.default.svc.cluster.local. 5 IN A 192.168.10.10 -*.service.default.svc.cluster.local. 5 IN A 192.168.25.15 +*.service.default.svc.cluster.local. 5 IN A 192.168.10.10 +*.service.default.svc.cluster.local. 5 IN A 192.168.25.15 .fi .RE diff --git a/man/coredns-loadbalance.7 b/man/coredns-loadbalance.7 index 0b5109a9a05..230421f76c6 100644 --- a/man/coredns-loadbalance.7 +++ b/man/coredns-loadbalance.7 @@ -1,18 +1,18 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-LOADBALANCE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-LOADBALANCE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIloadbalance\fP - randomize the order of A, AAAA and MX records. +\fIloadbalance\fP - randomizes the order of A, AAAA and MX records. .SH "DESCRIPTION" .PP -The \fIloadbalance\fP will act as a round-robin DNS loadbalancer by randomizing the order of A, AAAA, +The \fIloadbalance\fP will act as a round-robin DNS load balancer by randomizing the order of A, AAAA, and MX records in the answer. .PP See Wikipedia -\[la]https://en.wikipedia.org/wiki/Round-robin_DNS\[ra] about the pros and cons on this +\[la]https://en.wikipedia.org/wiki/Round-robin_DNS\[ra] about the pros and cons of this setup. It will take care to sort any CNAMEs before any address records, because some stub resolver implementations (like glibc) are particular about that. @@ -27,7 +27,7 @@ loadbalance [POLICY] .RE .IP \(bu 4 -\fBPOLICY\fP is how to balance, the default, and only option, is "round_robin". +\fBPOLICY\fP is how to balance. The default, and only option, is "round_robin". .SH "EXAMPLES" diff --git a/man/coredns-log.7 b/man/coredns-log.7 index c8e48de19f3..d9d7f30fb7c 100644 --- a/man/coredns-log.7 +++ b/man/coredns-log.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-LOG" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-LOG" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -155,7 +155,7 @@ Each of these logs will be outputted with \fB\fClog.Infof\fR, so a typical examp .RS .nf -2018\-10\-30T19:10:07.547Z [INFO] [::1]:50759 \- 29008 "A IN example.org. udp 41 false 4096" NOERROR qr,rd,ra,ad 68 0.037990251s +[INFO] [::1]:50759 \- 29008 "A IN example.org. udp 41 false 4096" NOERROR qr,rd,ra,ad 68 0.037990251s ~~~~ ## Examples diff --git a/man/coredns-loop.7 b/man/coredns-loop.7 index 27ba8901ecb..636d46ed623 100644 --- a/man/coredns-loop.7 +++ b/man/coredns-loop.7 @@ -1,9 +1,9 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-LOOP" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-LOOP" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIloop\fP - detect simple forwarding loops and halt the server. +\fIloop\fP - detects simple forwarding loops and halts the server. .SH "DESCRIPTION" .PP diff --git a/man/coredns-metadata.7 b/man/coredns-metadata.7 index e3598282e04..63af5a06609 100644 --- a/man/coredns-metadata.7 +++ b/man/coredns-metadata.7 @@ -1,31 +1,31 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-METADATA" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-METADATA" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fImetadata\fP - enable a meta data collector. +\fImetadata\fP - enables a metadata collector. .SH "DESCRIPTION" .PP By enabling \fImetadata\fP any plugin that implements metadata.Provider interface \[la]https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider\[ra] will be called for -each DNS query, at beginning of the process for that query, in order to add its own meta data to +each DNS query, at the beginning of the process for that query, in order to add its own metadata to context. .PP -The meta data collected will be available for all plugins, via the Context parameter provided in the +The metadata collected will be available for all plugins, via the Context parameter provided in the ServeDNS function. The package (code) documentation has examples on how to inspect and retrieve metadata a plugin might be interested in. .PP -The meta data is added by setting a label with a value in the context. These labels should be named +The metadata is added by setting a label with a value in the context. These labels should be named \fB\fCplugin/NAME\fR, where \fBNAME\fP is something descriptive. The only hard requirement the \fImetadata\fP -plugin enforces is that the labels contains a slash. See the documentation for +plugin enforces is that the labels contain a slash. See the documentation for \fB\fCmetadata.SetValueFunc\fR. .PP -The value stored is a string. The empty string signals "no meta data". See the documentation for +The value stored is a string. The empty string signals "no metadata". See the documentation for \fB\fCmetadata.ValueFunc\fR on how to retrieve this. .SH "SYNTAX" diff --git a/man/coredns-metrics.7 b/man/coredns-metrics.7 index 8e98634233c..6fd2f62157b 100644 --- a/man/coredns-metrics.7 +++ b/man/coredns-metrics.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-METRICS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-METRICS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-nsid.7 b/man/coredns-nsid.7 index df678afb637..765c5b7ed76 100644 --- a/man/coredns-nsid.7 +++ b/man/coredns-nsid.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-NSID" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-NSID" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -39,7 +39,7 @@ Enable nsid: .RS .nf -\&. { +example.org { whoami nsid Use The Force } @@ -66,7 +66,7 @@ And now a client with NSID support will see an OPT record with the NSID option: ; EDNS: version: 0, flags:; udp: 4096 ; NSID: 55 73 65 20 54 68 65 20 46 6f 72 63 65 ("Use The Force") ;; QUESTION SECTION: -;whoami.example.org. IN A +;whoami.example.org. IN A .fi .RE diff --git a/man/coredns-pprof.7 b/man/coredns-pprof.7 index 21556dead49..8a974144591 100644 --- a/man/coredns-pprof.7 +++ b/man/coredns-pprof.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-PPROF" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-PPROF" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-ready.7 b/man/coredns-ready.7 index cc0c370d576..f97a10323bd 100644 --- a/man/coredns-ready.7 +++ b/man/coredns-ready.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-READY" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-READY" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-reload.7 b/man/coredns-reload.7 index f78d55bf6e0..2ff687df91a 100644 --- a/man/coredns-reload.7 +++ b/man/coredns-reload.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-RELOAD" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-RELOAD" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -48,17 +48,15 @@ reload [INTERVAL] [JITTER] .fi .RE -.IP \(bu 4 -The plugin will check for changes every \fBINTERVAL\fP, subject to +/- the \fBJITTER\fP duration +.PP +The plugin will check for changes every \fBINTERVAL\fP, subject to +/- the \fBJITTER\fP duration. + .IP \(bu 4 \fBINTERVAL\fP and \fBJITTER\fP are Golang (durations)[https://golang.org/pkg/time/#ParseDuration -\[la]https://golang.org/pkg/time/#ParseDuration\[ra]] -.IP \(bu 4 -Default \fBINTERVAL\fP is 30s, default \fBJITTER\fP is 15s -.IP \(bu 4 -Minimal value for \fBINTERVAL\fP is 2s, and for \fBJITTER\fP is 1s -.IP \(bu 4 -If \fBJITTER\fP is more than half of \fBINTERVAL\fP, it will be set to half of \fBINTERVAL\fP +\[la]https://golang.org/pkg/time/#ParseDuration\[ra]]. +The default \fBINTERVAL\fP is 30s, default \fBJITTER\fP is 15s, the minimal value for \fBINTERVAL\fP +is 2s, and for \fBJITTER\fP it is 1s. If \fBJITTER\fP is more than half of \fBINTERVAL\fP, it will be +set to half of \fBINTERVAL\fP .SH "EXAMPLES" @@ -102,8 +100,8 @@ where the reload fails, and you loose functionality. Consider the following Core .nf \&. { - health :8080 - whoami + health :8080 + whoami } .fi diff --git a/man/coredns-rewrite.7 b/man/coredns-rewrite.7 index 4cd0022b083..898f7509da4 100644 --- a/man/coredns-rewrite.7 +++ b/man/coredns-rewrite.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-REWRITE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-REWRITE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-route53.7 b/man/coredns-route53.7 index e28a92f42ed..8aa1badcda5 100644 --- a/man/coredns-route53.7 +++ b/man/coredns-route53.7 @@ -1,5 +1,5 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS-ROUTE53" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-ROUTE53" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -73,8 +73,11 @@ Enable route53 with implicit AWS credentials and resolve CNAMEs via 10.0.0.1: .RS .nf +example.org { + route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 +} + \&. { - route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 forward . 10.0.0.1 } @@ -88,7 +91,7 @@ Enable route53 with explicit AWS credentials: .RS .nf -\&. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { aws\_access\_key AWS\_ACCESS\_KEY\_ID AWS\_SECRET\_ACCESS\_KEY } @@ -120,7 +123,7 @@ Enable route53 with multiple hosted zones with the same domain: .RS .nf -\&. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 example.org.:Z93A52145678156 } @@ -134,7 +137,7 @@ Enable route53 and refresh records every 3 minutes .RS .nf -\&. { +example.org { route53 example.org.:Z1Z2Z3Z4DZ5Z6Z7 { refresh 3m } diff --git a/man/coredns-secondary.7 b/man/coredns-secondary.7 index 45d5e257cb6..9ca520eaaf6 100644 --- a/man/coredns-secondary.7 +++ b/man/coredns-secondary.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-SECONDARY" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-SECONDARY" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-sign.7 b/man/coredns-sign.7 index 773e930ab5b..06cdf68a717 100644 --- a/man/coredns-sign.7 +++ b/man/coredns-sign.7 @@ -1,9 +1,9 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-SIGN" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-SIGN" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP -\fIsign\fP - add DNSSEC records to zone files. +\fIsign\fP - adds DNSSEC records to zone files. .SH "DESCRIPTION" .PP diff --git a/man/coredns-template.7 b/man/coredns-template.7 index 54aa9fda63d..948f11f449e 100644 --- a/man/coredns-template.7 +++ b/man/coredns-template.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-TEMPLATE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TEMPLATE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-tls.7 b/man/coredns-tls.7 index db0022d2058..016c09c2d7c 100644 --- a/man/coredns-tls.7 +++ b/man/coredns-tls.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-TLS" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TLS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -60,8 +60,8 @@ nameservers defined in \fB\fC/etc/resolv.conf\fR to resolve the query. This prox .nf tls://.:5553 { - tls cert.pem key.pem ca.pem - forward . /etc/resolv.conf + tls cert.pem key.pem ca.pem + forward . /etc/resolv.conf } .fi @@ -76,8 +76,8 @@ incoming queries. .nf grpc://. { - tls cert.pem key.pem ca.pem - forward . /etc/resolv.conf + tls cert.pem key.pem ca.pem + forward . /etc/resolv.conf } .fi diff --git a/man/coredns-whoami.7 b/man/coredns-whoami.7 index 906527920fc..f71cd891fad 100644 --- a/man/coredns-whoami.7 +++ b/man/coredns-whoami.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-WHOAMI" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-WHOAMI" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -50,7 +50,7 @@ Start a server on the default port and load the \fIwhoami\fP plugin. .RS .nf -\&. { +example.org { whoami } From 22dd0a6f4500037d624e0741002394f10e3dae23 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 27 Sep 2019 13:47:57 +0100 Subject: [PATCH 210/324] Add 1.6.4 release notes (#3315) Automatically submitted. --- notes/coredns-1.6.4.md | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 notes/coredns-1.6.4.md diff --git a/notes/coredns-1.6.4.md b/notes/coredns-1.6.4.md new file mode 100644 index 00000000000..3a8a4bc4d1e --- /dev/null +++ b/notes/coredns-1.6.4.md @@ -0,0 +1,41 @@ ++++ +title = "CoreDNS-1.6.4 Release" +description = "CoreDNS-1.6.4 Release Notes." +tags = ["Release", "1.6.4", "Notes"] +release = "1.6.4" +date = 2019-09-27T10:00:00+00:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.4](https://github.com/coredns/coredns/releases/tag/v1.6.4). + +Various code cleanups and documentation improvements. We've added one new plugin: *acl*, that allows +blocking requests. + +# Plugins + +* [*acl*](/plugins/acl) block request from IPs or IP ranges. +* [*kubernetes*](/plugins/kubernetes) received some bug fixes, see below for specific PRs. +* [*hosts*](/plugins/hosts) exports metrics on the number of entries and last reload time. + +## Brought to You By + +An Xiao, +Chris O'Haver, +Cricket Liu, +Guangming Wang, +Kasisnu, +li mengyang, +Miek Gieben, +orangelynx, +xieyanker, +yeya24, +Yong Tang. + +## Noteworthy Changes + +* plugin/hosts: add host metrics (https://github.com/coredns/coredns/pull/3277) +* plugin/kubernetes: Don't duplicate service record for every port (https://github.com/coredns/coredns/pull/3240) +* plugin/kubernetes: Handle multiple local IPs and bind (https://github.com/coredns/coredns/pull/3208) +* Add plugin ACL for source IP filtering (https://github.com/coredns/coredns/pull/3103) From b139ba34f370a4937bf76e7cc259a26f1394a91d Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 27 Sep 2019 14:36:14 +0100 Subject: [PATCH 211/324] Set version to 1.6.4 (#3317) Signed-off-by: Miek Gieben --- coremain/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coremain/version.go b/coremain/version.go index ae44275ba2d..1e7766cd093 100644 --- a/coremain/version.go +++ b/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.6.3" + CoreVersion = "1.6.4" coreName = "CoreDNS" serverType = "dns" ) From 4ff5635a6e5f5ab02498eea97323fafe4f3eccf6 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 27 Sep 2019 19:49:14 +0100 Subject: [PATCH 212/324] Run go generate and update deps (#3319) Go gen updated the deps this is why 1.6.4 has that suffix. Do a go gen and a go mod tidy. Fixes: #3318 Signed-off-by: Miek Gieben --- go.mod | 1 + go.sum | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index ee47cc0bad5..f748bdc2bcc 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/Azure/go-autorest/autorest v0.9.1 github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect + github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect github.com/aws/aws-sdk-go v1.24.3 diff --git a/go.sum b/go.sum index 9f4a9740bfe..2052a2b7fcd 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,8 @@ github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VY github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Shopify/sarama v1.21.0 h1:0GKs+e8mn1RRUzfg9oUXv3v7ZieQLmOZF/bfnmmGhM8= @@ -45,8 +47,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.23.21 h1:eVJT2C99cAjZlBY8+CJovf6AwrSANzAcYNuxdCB+SPk= -github.com/aws/aws-sdk-go v1.23.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.24.3 h1:113A33abx/cqv0ga94D8z9elza1YEm749ltJI67Uhq4= github.com/aws/aws-sdk-go v1.24.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -438,8 +438,6 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54= -gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 h1:n0pmVs3TxCRC9Jrd1/aNaB8roW5ZHMw0UOWPxAoUkkI= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From ba5d4a637271b302830e38af0092d798bd9ad668 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 28 Sep 2019 10:40:43 +0100 Subject: [PATCH 213/324] plugin register (#3321) These plugins where missed in #3287 because their setup is done in a file other than setup.go Signed-off-by: Miek Gieben --- plugin/bind/bind.go | 9 ++------- plugin/cancel/cancel.go | 7 +------ plugin/debug/debug.go | 7 +------ plugin/root/root.go | 7 +------ plugin/tls/tls.go | 7 +------ 5 files changed, 6 insertions(+), 31 deletions(-) diff --git a/plugin/bind/bind.go b/plugin/bind/bind.go index 749561a3616..cfbd36597a0 100644 --- a/plugin/bind/bind.go +++ b/plugin/bind/bind.go @@ -1,11 +1,6 @@ // Package bind allows binding to a specific interface instead of bind to all of them. package bind -import "github.com/caddyserver/caddy" +import "github.com/coredns/coredns/plugin" -func init() { - caddy.RegisterPlugin("bind", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("bind", setup) } diff --git a/plugin/cancel/cancel.go b/plugin/cancel/cancel.go index 10efabfea5b..98f4f236796 100644 --- a/plugin/cancel/cancel.go +++ b/plugin/cancel/cancel.go @@ -13,12 +13,7 @@ import ( "github.com/miekg/dns" ) -func init() { - caddy.RegisterPlugin("cancel", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("cancel", setup) } func setup(c *caddy.Controller) error { ca := Cancel{timeout: 5001 * time.Millisecond} diff --git a/plugin/debug/debug.go b/plugin/debug/debug.go index 225e5f42276..91cc6fcf0c3 100644 --- a/plugin/debug/debug.go +++ b/plugin/debug/debug.go @@ -7,12 +7,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("debug", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("debug", setup) } func setup(c *caddy.Controller) error { config := dnsserver.GetConfig(c) diff --git a/plugin/root/root.go b/plugin/root/root.go index 15cc45626c8..66177a6bd5e 100644 --- a/plugin/root/root.go +++ b/plugin/root/root.go @@ -12,12 +12,7 @@ import ( var log = clog.NewWithPlugin("root") -func init() { - caddy.RegisterPlugin("root", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("root", setup) } func setup(c *caddy.Controller) error { config := dnsserver.GetConfig(c) diff --git a/plugin/tls/tls.go b/plugin/tls/tls.go index 3231e938176..bfde8cb2a67 100644 --- a/plugin/tls/tls.go +++ b/plugin/tls/tls.go @@ -10,12 +10,7 @@ import ( "github.com/caddyserver/caddy" ) -func init() { - caddy.RegisterPlugin("tls", caddy.Plugin{ - ServerType: "dns", - Action: setup, - }) -} +func init() { plugin.Register("tls", setup) } func setup(c *caddy.Controller) error { err := parseTLS(c) From 03a3695ea9af1ff704861141a6ca807d742a894f Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 28 Sep 2019 10:41:12 +0100 Subject: [PATCH 214/324] plugins: calling Dispenser itself is a mistake (#3323) Remove all these uses and just make them work on caddy.Controller. Also don't export parsing functions as their should be private to the plugin. Signed-off-by: Miek Gieben --- plugin/dnstap/setup.go | 5 ++--- plugin/dnstap/setup_test.go | 2 +- plugin/forward/setup.go | 8 +++----- plugin/grpc/setup.go | 7 +++---- 4 files changed, 9 insertions(+), 13 deletions(-) diff --git a/plugin/dnstap/setup.go b/plugin/dnstap/setup.go index 06f26428c21..11269b2912b 100644 --- a/plugin/dnstap/setup.go +++ b/plugin/dnstap/setup.go @@ -10,7 +10,6 @@ import ( "github.com/coredns/coredns/plugin/pkg/parse" "github.com/caddyserver/caddy" - "github.com/caddyserver/caddy/caddyfile" ) var log = clog.NewWithPlugin("dnstap") @@ -30,7 +29,7 @@ type config struct { full bool } -func parseConfig(d *caddyfile.Dispenser) (c config, err error) { +func parseConfig(d *caddy.Controller) (c config, err error) { d.Next() // directive name if !d.Args(&c.target) { @@ -56,7 +55,7 @@ func parseConfig(d *caddyfile.Dispenser) (c config, err error) { } func setup(c *caddy.Controller) error { - conf, err := parseConfig(&c.Dispenser) + conf, err := parseConfig(c) if err != nil { return err } diff --git a/plugin/dnstap/setup_test.go b/plugin/dnstap/setup_test.go index eef941d89c6..5ed8c3b6a09 100644 --- a/plugin/dnstap/setup_test.go +++ b/plugin/dnstap/setup_test.go @@ -21,7 +21,7 @@ func TestConfig(t *testing.T) { } for _, c := range tests { cad := caddy.NewTestController("dns", c.file) - conf, err := parseConfig(&cad.Dispenser) + conf, err := parseConfig(cad) if c.fail { if err == nil { t.Errorf("%s: %s", c.file, err) diff --git a/plugin/forward/setup.go b/plugin/forward/setup.go index 60ec08fa3ac..3bfee9f01ec 100644 --- a/plugin/forward/setup.go +++ b/plugin/forward/setup.go @@ -13,7 +13,6 @@ import ( "github.com/coredns/coredns/plugin/pkg/transport" "github.com/caddyserver/caddy" - "github.com/caddyserver/caddy/caddyfile" ) func init() { plugin.Register("forward", setup) } @@ -74,7 +73,7 @@ func parseForward(c *caddy.Controller) (*Forward, error) { return nil, plugin.ErrOnce } i++ - f, err = ParseForwardStanza(&c.Dispenser) + f, err = parseStanza(c) if err != nil { return nil, err } @@ -82,8 +81,7 @@ func parseForward(c *caddy.Controller) (*Forward, error) { return f, nil } -// ParseForwardStanza parses one forward stanza -func ParseForwardStanza(c *caddyfile.Dispenser) (*Forward, error) { +func parseStanza(c *caddy.Controller) (*Forward, error) { f := New() if !c.Args(&f.from) { @@ -128,7 +126,7 @@ func ParseForwardStanza(c *caddyfile.Dispenser) (*Forward, error) { return f, nil } -func parseBlock(c *caddyfile.Dispenser, f *Forward) error { +func parseBlock(c *caddy.Controller, f *Forward) error { switch c.Val() { case "except": ignore := c.RemainingArgs() diff --git a/plugin/grpc/setup.go b/plugin/grpc/setup.go index 5c0c9a4521b..a234efb37b6 100644 --- a/plugin/grpc/setup.go +++ b/plugin/grpc/setup.go @@ -11,7 +11,6 @@ import ( pkgtls "github.com/coredns/coredns/plugin/pkg/tls" "github.com/caddyserver/caddy" - "github.com/caddyserver/caddy/caddyfile" ) func init() { plugin.Register("grpc", setup) } @@ -50,7 +49,7 @@ func parseGRPC(c *caddy.Controller) (*GRPC, error) { return nil, plugin.ErrOnce } i++ - g, err = parseGRPCStanza(&c.Dispenser) + g, err = parseStanza(c) if err != nil { return nil, err } @@ -58,7 +57,7 @@ func parseGRPC(c *caddy.Controller) (*GRPC, error) { return g, nil } -func parseGRPCStanza(c *caddyfile.Dispenser) (*GRPC, error) { +func parseStanza(c *caddy.Controller) (*GRPC, error) { g := newGRPC() if !c.Args(&g.from) { @@ -99,7 +98,7 @@ func parseGRPCStanza(c *caddyfile.Dispenser) (*GRPC, error) { return g, nil } -func parseBlock(c *caddyfile.Dispenser, g *GRPC) error { +func parseBlock(c *caddy.Controller, g *GRPC) error { switch c.Val() { case "except": From cd5aeecb43a6fddddd757e8e72fe04dfeb82b34a Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 30 Sep 2019 09:01:19 +0100 Subject: [PATCH 215/324] Makefiles: remove GO111MODULE (#3326) * Makefiles: remove GO111MODULE This is the default in 1.13 it can be removed; also make the test out log less, so failure are more pronounced in the travis out, hide coverage stats for instance. Signed-off-by: Miek Gieben * Kill -v as wel Signed-off-by: Miek Gieben --- Makefile | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 648b8f38bfc..e82ffc7e96d 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ all: coredns .PHONY: coredns coredns: $(CHECKS) - GO111MODULE=on CGO_ENABLED=$(CGO_ENABLED) $(SYSTEM) go build $(BUILDOPTS) -ldflags="-s -w -X github.com/coredns/coredns/coremain.GitCommit=$(GITCOMMIT)" -o $(BINARY) + CGO_ENABLED=$(CGO_ENABLED) $(SYSTEM) go build $(BUILDOPTS) -ldflags="-s -w -X github.com/coredns/coredns/coremain.GitCommit=$(GITCOMMIT)" -o $(BINARY) .PHONY: check check: presubmit core/plugin/zplugin.go core/dnsserver/zdirectives.go @@ -22,25 +22,23 @@ check: presubmit core/plugin/zplugin.go core/dnsserver/zdirectives.go .PHONY: travis travis: ifeq ($(TEST_TYPE),core) - ( cd request ; GO111MODULE=on go test -v -race ./... ) - ( cd core ; GO111MODULE=on go test -v -race ./... ) - ( cd coremain ; GO111MODULE=on go test -v -race ./... ) + ( cd request; go test -race ./... ) + ( cd core; go test -race ./... ) + ( cd coremain; go test -race ./... ) endif ifeq ($(TEST_TYPE),integration) - ( cd test ; GO111MODULE=on go test -v -race ./... ) + ( cd test; go test -race ./... ) endif ifeq ($(TEST_TYPE),plugin) - ( cd plugin ; GO111MODULE=on go test -v -race ./... ) + ( cd plugin; go test -race ./... ) endif ifeq ($(TEST_TYPE),coverage) for d in `go list ./... | grep -v vendor`; do \ t=$$(date +%s); \ - GO111MODULE=on go test -i -coverprofile=cover.out -covermode=atomic $$d || exit 1; \ - GO111MODULE=on go test -v -coverprofile=cover.out -covermode=atomic $$d || exit 1; \ - echo "Coverage test $$d took $$(($$(date +%s)-t)) seconds"; \ + go test -i -coverprofile=cover.out -covermode=atomic $$d || exit 1; \ + go test -coverprofile=cover.out -covermode=atomic $$d || exit 1; \ if [ -f cover.out ]; then \ - cat cover.out >> coverage.txt; \ - rm cover.out; \ + cat cover.out >> coverage.txt && rm cover.out; \ fi; \ done endif @@ -61,11 +59,11 @@ ifeq ($(TEST_TYPE),fuzzit) endif core/plugin/zplugin.go core/dnsserver/zdirectives.go: plugin.cfg - GO111MODULE=on go generate coredns.go + go generate coredns.go .PHONY: gen gen: - GO111MODULE=on go generate coredns.go + go generate coredns.go .PHONY: pb pb: @@ -78,5 +76,5 @@ presubmit: .PHONY: clean clean: - GO111MODULE=on go clean + go clean rm -f coredns From 9ae5a1a85611a70e9edb796cc33c282a04dfdbe5 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2019 09:16:13 -0400 Subject: [PATCH 216/324] build(deps): bump google.golang.org/grpc from 1.23.0 to 1.24.0 (#3332) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.23.0 to 1.24.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.23.0...v1.24.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index f748bdc2bcc..262d04a6ea2 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( golang.org/x/sys v0.0.0-20190904154756-749cb33beabd google.golang.org/api v0.10.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect - google.golang.org/grpc v1.23.0 + google.golang.org/grpc v1.24.0 gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 diff --git a/go.sum b/go.sum index 2052a2b7fcd..025027398a3 100644 --- a/go.sum +++ b/go.sum @@ -438,6 +438,8 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 h1:n0pmVs3TxCRC9Jrd1/aNaB8roW5ZHMw0UOWPxAoUkkI= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From 55a01dadaacc33674aff1e936b101695f9de32cf Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2019 08:29:59 -0700 Subject: [PATCH 217/324] build(deps): bump github.com/aws/aws-sdk-go from 1.24.3 to 1.25.1 (#3330) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.24.3 to 1.25.1. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.24.3...v1.25.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 262d04a6ea2..260153c6ebe 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.24.3 + github.com/aws/aws-sdk-go v1.25.1 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 025027398a3..c70341d8f49 100644 --- a/go.sum +++ b/go.sum @@ -49,6 +49,8 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.24.3 h1:113A33abx/cqv0ga94D8z9elza1YEm749ltJI67Uhq4= github.com/aws/aws-sdk-go v1.24.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.1 h1:d7zDXFT2Tgq/yw7Wku49+lKisE8Xc85erb+8PlE/Shk= +github.com/aws/aws-sdk-go v1.25.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From dbd1c047cb973fc669fbe4e97dc12e7b3d3e6ed8 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 1 Oct 2019 07:41:29 +0100 Subject: [PATCH 218/324] Run gostaticheck (#3325) * Run gostaticheck Run gostaticcheck on the codebase and fix almost all flagged items. Only keep * coremain/run.go:192:2: var appVersion is unused (U1000) * plugin/chaos/setup.go:54:3: the surrounding loop is unconditionally terminated (SA4004) * plugin/etcd/setup.go:103:3: the surrounding loop is unconditionally terminated (SA4004) * plugin/pkg/replacer/replacer.go:274:13: argument should be pointer-like to avoid allocations (SA6002) * plugin/route53/setup.go:124:28: session.New is deprecated: Use NewSession functions to create sessions instead. NewSession has the same functionality as New except an error can be returned when the func is called instead of waiting to receive an error until a request is made. (SA1019) * test/grpc_test.go:25:69: grpc.WithTimeout is deprecated: use DialContext and context.WithTimeout instead. Will be supported throughout 1.x. (SA1019) The first one isn't true, as this is set via ldflags. The rest is minor. The deprecation should be fixed at some point; I'll file some issues. Signed-off-by: Miek Gieben * Make sure to plug in the plugins import the plugins, that file that did this was removed, put it in the reload test as this requires an almost complete coredns server. Signed-off-by: Miek Gieben --- coremain/run.go | 3 +- plugin/acl/acl.go | 3 -- plugin/auto/walk.go | 2 +- plugin/cancel/cancel.go | 4 +- plugin/dnssec/dnssec_test.go | 14 ------- plugin/dnstap/handler.go | 6 +-- plugin/dnstap/setup.go | 3 -- plugin/etcd/setup.go | 3 -- plugin/forward/proxy.go | 5 +-- plugin/rewrite/name.go | 2 +- plugin/rewrite/rewrite.go | 1 - plugin/rewrite/setup.go | 3 -- plugin/rewrite/ttl.go | 2 +- plugin/template/template.go | 6 +-- plugin/trace/trace_test.go | 2 - test/doc.go | 2 +- test/external_test.go | 72 ------------------------------------ test/plugin_test.go | 43 --------------------- test/server.go | 3 +- 19 files changed, 14 insertions(+), 165 deletions(-) delete mode 100644 test/external_test.go delete mode 100644 test/plugin_test.go diff --git a/coremain/run.go b/coremain/run.go index b62878c3a03..5dcb3c013bd 100644 --- a/coremain/run.go +++ b/coremain/run.go @@ -173,8 +173,7 @@ func setVersion() { // Only set the appVersion if -ldflags was used if gitNearestTag != "" || gitTag != "" { if devBuild && gitNearestTag != "" { - appVersion = fmt.Sprintf("%s (+%s %s)", - strings.TrimPrefix(gitNearestTag, "v"), GitCommit, buildDate) + appVersion = fmt.Sprintf("%s (+%s %s)", strings.TrimPrefix(gitNearestTag, "v"), GitCommit, buildDate) } else if gitTag != "" { appVersion = strings.TrimPrefix(gitTag, "v") } diff --git a/plugin/acl/acl.go b/plugin/acl/acl.go index b25138a309e..2eea0a27c55 100644 --- a/plugin/acl/acl.go +++ b/plugin/acl/acl.go @@ -6,15 +6,12 @@ import ( "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/metrics" - clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/request" "github.com/infobloxopen/go-trees/iptree" "github.com/miekg/dns" ) -var log = clog.NewWithPlugin("acl") - // ACL enforces access control policies on DNS queries. type ACL struct { Next plugin.Handler diff --git a/plugin/auto/walk.go b/plugin/auto/walk.go index d4002a46d1c..40c62e51445 100644 --- a/plugin/auto/walk.go +++ b/plugin/auto/walk.go @@ -20,7 +20,7 @@ func (a Auto) Walk() error { toDelete[n] = true } - filepath.Walk(a.loader.directory, func(path string, info os.FileInfo, err error) error { + filepath.Walk(a.loader.directory, func(path string, info os.FileInfo, _ error) error { if info == nil || info.IsDir() { return nil } diff --git a/plugin/cancel/cancel.go b/plugin/cancel/cancel.go index 98f4f236796..9ded73e82df 100644 --- a/plugin/cancel/cancel.go +++ b/plugin/cancel/cancel.go @@ -16,13 +16,13 @@ import ( func init() { plugin.Register("cancel", setup) } func setup(c *caddy.Controller) error { - ca := Cancel{timeout: 5001 * time.Millisecond} + ca := Cancel{} for c.Next() { args := c.RemainingArgs() switch len(args) { case 0: - break + ca.timeout = 5001 * time.Millisecond case 1: dur, err := time.ParseDuration(args[0]) if err != nil { diff --git a/plugin/dnssec/dnssec_test.go b/plugin/dnssec/dnssec_test.go index e60b5ee7ecc..fb8a128def0 100644 --- a/plugin/dnssec/dnssec_test.go +++ b/plugin/dnssec/dnssec_test.go @@ -153,20 +153,6 @@ func testMsgCname() *dns.Msg { } } -func testDelegationMsg() *dns.Msg { - return &dns.Msg{ - Ns: []dns.RR{ - test.NS("miek.nl. 3600 IN NS linode.atoom.net."), - test.NS("miek.nl. 3600 IN NS ns-ext.nlnetlabs.nl."), - test.NS("miek.nl. 3600 IN NS omval.tednet.nl."), - }, - Extra: []dns.RR{ - test.A("omval.tednet.nl. 3600 IN A 185.49.141.42"), - test.AAAA("omval.tednet.nl. 3600 IN AAAA 2a04:b900:0:100::42"), - }, - } -} - func testMsgDname() *dns.Msg { return &dns.Msg{ Answer: []dns.RR{ diff --git a/plugin/dnstap/handler.go b/plugin/dnstap/handler.go index 1178dad79fc..0dde3a34689 100644 --- a/plugin/dnstap/handler.go +++ b/plugin/dnstap/handler.go @@ -30,13 +30,9 @@ type ( TapMessage(message *tap.Message) Pack() bool } - tapContext struct { - context.Context - Dnstap - } ) -// ContextKey defines the type of key that is used to save data into the context +// ContextKey defines the type of key that is used to save data into the context. type ContextKey string const ( diff --git a/plugin/dnstap/setup.go b/plugin/dnstap/setup.go index 11269b2912b..ee481fe1171 100644 --- a/plugin/dnstap/setup.go +++ b/plugin/dnstap/setup.go @@ -6,14 +6,11 @@ import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/dnstap/dnstapio" - clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/plugin/pkg/parse" "github.com/caddyserver/caddy" ) -var log = clog.NewWithPlugin("dnstap") - func init() { plugin.Register("dnstap", wrapSetup) } func wrapSetup(c *caddy.Controller) error { diff --git a/plugin/etcd/setup.go b/plugin/etcd/setup.go index c2916de517f..a95549f3255 100644 --- a/plugin/etcd/setup.go +++ b/plugin/etcd/setup.go @@ -5,7 +5,6 @@ import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" - clog "github.com/coredns/coredns/plugin/pkg/log" mwtls "github.com/coredns/coredns/plugin/pkg/tls" "github.com/coredns/coredns/plugin/pkg/upstream" @@ -13,8 +12,6 @@ import ( etcdcv3 "go.etcd.io/etcd/clientv3" ) -var log = clog.NewWithPlugin("etcd") - func init() { plugin.Register("etcd", setup) } func setup(c *caddy.Controller) error { diff --git a/plugin/forward/proxy.go b/plugin/forward/proxy.go index 78b2cf3b827..6485d8d726e 100644 --- a/plugin/forward/proxy.go +++ b/plugin/forward/proxy.go @@ -12,11 +12,8 @@ import ( // Proxy defines an upstream host. type Proxy struct { fails uint32 + addr string - addr string - - // Connection caching - expire time.Duration transport *Transport // health checking diff --git a/plugin/rewrite/name.go b/plugin/rewrite/name.go index 9302d563a3b..7a8f9ad7ca3 100644 --- a/plugin/rewrite/name.go +++ b/plugin/rewrite/name.go @@ -195,7 +195,7 @@ func newNameRule(nextAction string, args ...string) (Rule, error) { }, }, nil default: - return nil, fmt.Errorf("A name rule supports only exact, prefix, suffix, substring, and regex name matching, received: %s", matchType) + return nil, fmt.Errorf("name rule supports only exact, prefix, suffix, substring, and regex name matching, received: %s", matchType) } } if len(args) == 7 { diff --git a/plugin/rewrite/rewrite.go b/plugin/rewrite/rewrite.go index 2eabf6df280..13e1d2092d5 100644 --- a/plugin/rewrite/rewrite.go +++ b/plugin/rewrite/rewrite.go @@ -61,7 +61,6 @@ func (rw Rewrite) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg return plugin.NextOrFailure(rw.Name(), rw.Next, ctx, wr, r) } case RewriteIgnored: - break } } if rw.noRevert || len(wr.ResponseRules) == 0 { diff --git a/plugin/rewrite/setup.go b/plugin/rewrite/setup.go index 6b6bcce6695..8c2890c6298 100644 --- a/plugin/rewrite/setup.go +++ b/plugin/rewrite/setup.go @@ -3,13 +3,10 @@ package rewrite import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" - clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/caddyserver/caddy" ) -var log = clog.NewWithPlugin("rewrite") - func init() { plugin.Register("rewrite", setup) } func setup(c *caddy.Controller) error { diff --git a/plugin/rewrite/ttl.go b/plugin/rewrite/ttl.go index 73445dfc919..59ed9f52a1e 100644 --- a/plugin/rewrite/ttl.go +++ b/plugin/rewrite/ttl.go @@ -159,7 +159,7 @@ func newTTLRule(nextAction string, args ...string) (Rule, error) { }, }, nil default: - return nil, fmt.Errorf("A ttl rule supports only exact, prefix, suffix, substring, and regex name matching") + return nil, fmt.Errorf("ttl rule supports only exact, prefix, suffix, substring, and regex name matching") } } if len(args) > 3 { diff --git a/plugin/template/template.go b/plugin/template/template.go index eaa291f7f79..f2a7b186fe7 100644 --- a/plugin/template/template.go +++ b/plugin/template/template.go @@ -73,7 +73,7 @@ func (h Handler) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) } for _, template := range h.Templates { - data, match, fthrough := template.match(ctx, state, zone) + data, match, fthrough := template.match(ctx, state) if !match { if !fthrough { return dns.RcodeNameError, nil @@ -143,11 +143,11 @@ func executeRRTemplate(server, section string, template *gotmpl.Template, data * return rr, nil } -func (t template) match(ctx context.Context, state request.Request, zone string) (*templateData, bool, bool) { +func (t template) match(ctx context.Context, state request.Request) (*templateData, bool, bool) { q := state.Req.Question[0] data := &templateData{md: metadata.ValueFuncs(ctx)} - zone = plugin.Zones(t.zones).Matches(state.Name()) + zone := plugin.Zones(t.zones).Matches(state.Name()) if zone == "" { return data, false, true } diff --git a/plugin/trace/trace_test.go b/plugin/trace/trace_test.go index 48a4d7e78fa..7ea49a2554d 100644 --- a/plugin/trace/trace_test.go +++ b/plugin/trace/trace_test.go @@ -14,8 +14,6 @@ import ( "github.com/opentracing/opentracing-go/mocktracer" ) -const server = "coolServer" - func TestStartup(t *testing.T) { m, err := traceParse(caddy.NewTestController("dns", `trace`)) if err != nil { diff --git a/test/doc.go b/test/doc.go index ba09e877248..528092a857b 100644 --- a/test/doc.go +++ b/test/doc.go @@ -1,2 +1,2 @@ -// Package test contains function and types useful for writing tests +// Package test contains function and types useful for writing tests. package test diff --git a/test/external_test.go b/test/external_test.go deleted file mode 100644 index 8e6adab9123..00000000000 --- a/test/external_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package test - -import ( - "fmt" - "os" - "os/exec" - "strings" - "testing" -) - -// Go get external example plugin, compile it into CoreDNS -// and check if it is really there, but running coredns -plugins. - -// Dangerous test as it messes with your git tree, maybe use tag? -func testExternalPluginCompile(t *testing.T) { - if err := addExamplePlugin(); err != nil { - t.Fatal(err) - } - defer run(t, gitReset) - - if _, err := run(t, goGet); err != nil { - t.Fatal(err) - } - - if _, err := run(t, goGen); err != nil { - t.Fatal(err) - } - - if _, err := run(t, goBuild); err != nil { - t.Fatal(err) - } - - out, err := run(t, coredns) - if err != nil { - t.Fatal(err) - } - - if !strings.Contains(string(out), "dns.example") { - t.Fatal("Plugin dns.example should be there") - } -} - -func run(t *testing.T, c *exec.Cmd) ([]byte, error) { - c.Dir = ".." - out, err := c.Output() - if err != nil { - return nil, fmt.Errorf("run: failed to run %s %s: %q", c.Args[0], c.Args[1], err) - } - return out, nil - -} - -func addExamplePlugin() error { - f, err := os.OpenFile("../plugin.cfg", os.O_APPEND|os.O_WRONLY, os.ModeAppend) - if err != nil { - return err - } - defer f.Close() - - _, err = f.WriteString(example) - return err -} - -var ( - goBuild = exec.Command("go", "build") - goGen = exec.Command("go", "generate") - goGet = exec.Command("go", "get", "github.com/coredns/example") - gitReset = exec.Command("git", "checkout", "core/*") - coredns = exec.Command("./coredns", "-plugins") -) - -const example = "1001:example:github.com/coredns/example" diff --git a/test/plugin_test.go b/test/plugin_test.go deleted file mode 100644 index 4003a958fe3..00000000000 --- a/test/plugin_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package test - -import ( - "testing" - - "github.com/coredns/coredns/plugin/test" - - "github.com/miekg/dns" - - // Load all managed plugins in github.com/coredns/coredns - _ "github.com/coredns/coredns/core/plugin" -) - -func benchmarkLookupBalanceRewriteCache(b *testing.B) { - t := new(testing.T) - name, rm, err := test.TempFile(".", exampleOrg) - if err != nil { - t.Fatalf("Failed to create zone: %s", err) - } - defer rm() - - corefile := `example.org:0 { - file ` + name + ` - rewrite type ANY HINFO - loadbalance -} -` - - ex, udp, _, err := CoreDNSServerAndPorts(corefile) - if err != nil { - t.Fatalf("Could not get CoreDNS serving instance: %s", err) - } - defer ex.Stop() - - c := new(dns.Client) - m := new(dns.Msg) - m.SetQuestion("example.org.", dns.TypeA) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - c.Exchange(m, udp) - } -} diff --git a/test/server.go b/test/server.go index 93016501dc0..9908285d135 100644 --- a/test/server.go +++ b/test/server.go @@ -4,9 +4,10 @@ import ( "sync" "github.com/coredns/coredns/core/dnsserver" - // Hook in CoreDNS. _ "github.com/coredns/coredns/core" + // Load all managed plugins in github.com/coredns/coredns + _ "github.com/coredns/coredns/core/plugin" "github.com/caddyserver/caddy" ) From 4ffbee299a751591ae98d7f07d3c721b819906c1 Mon Sep 17 00:00:00 2001 From: Erfan Besharat Date: Tue, 1 Oct 2019 10:12:10 +0330 Subject: [PATCH 219/324] Remove deprecated function calls flagged by staticcheck (#3333) * Use session.NewSession instead of session.New Signed-off-by: Erfan Besharat * Use grpc.DialContext instead of grpc.WithTimeout Signed-off-by: Erfan Besharat * Pass non-nil context to context.WithTimeout Signed-off-by: Erfan Besharat * Return the error directly in route53 setup Co-Authored-By: Miek Gieben Signed-off-by: Erfan Besharat --- plugin/route53/setup.go | 8 +++++++- test/grpc_test.go | 3 ++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/plugin/route53/setup.go b/plugin/route53/setup.go index 31169edafc8..3b47fad5931 100644 --- a/plugin/route53/setup.go +++ b/plugin/route53/setup.go @@ -120,8 +120,14 @@ func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Ro return plugin.Error("route53", c.Errf("unknown property '%s'", c.Val())) } } + + session, err := session.NewSession(&aws.Config{}) + if err != nil { + return plugin.Error("route53", err) + } + providers = append(providers, &credentials.EnvProvider{}, sharedProvider, &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.New(session.New(&aws.Config{})), + Client: ec2metadata.New(session), }) client := f(credentials.NewChainCredentials(providers)) ctx := context.Background() diff --git a/test/grpc_test.go b/test/grpc_test.go index fe273e78900..a0d48490da4 100644 --- a/test/grpc_test.go +++ b/test/grpc_test.go @@ -22,7 +22,8 @@ func TestGrpc(t *testing.T) { } defer g.Stop() - conn, err := grpc.Dial(tcp, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithTimeout(5*time.Second)) + ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + conn, err := grpc.DialContext(ctx, tcp, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { t.Fatalf("Expected no error but got: %s", err) } From 7b69dfebb595fe08bc1fcf52234880da79d4e1aa Mon Sep 17 00:00:00 2001 From: Uladzimir Trehubenka Date: Tue, 1 Oct 2019 15:22:42 +0300 Subject: [PATCH 220/324] plugin/file: fix panic in miekg/dns.CompareDomainName() (#3337) Signed-off-by: utrehubenka --- plugin/file/lookup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/file/lookup.go b/plugin/file/lookup.go index 15ea096bbe5..3d8d899dfe6 100644 --- a/plugin/file/lookup.go +++ b/plugin/file/lookup.go @@ -372,7 +372,7 @@ func (z *Zone) additionalProcessing(answer []dns.RR, do bool) (extra []dns.RR) { case *dns.MX: name = x.Mx } - if !dns.IsSubDomain(z.origin, name) { + if len(name) == 0 || !dns.IsSubDomain(z.origin, name) { continue } From 2d98d520b5c75d31e37dfe72a67b39707a56dc69 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 1 Oct 2019 16:39:42 +0100 Subject: [PATCH 221/324] plugin/forward: make Yield not block (#3336) * plugin/forward: may Yield not block Yield may block when we're super busy with creating (and looking) for connection. Set a small timeout on Yield, to skip putting the connection back in the queue. Use persistentConn troughout the socket handling code to be more consistent. Signed-off-by: Miek Gieben Dont do Signed-off-by: Miek Gieben * Set used in Yield This gives one central place where we update used in the persistConns Signed-off-by: Miek Gieben --- plugin/forward/README.md | 1 - plugin/forward/connect.go | 34 ++++++++++---------- plugin/forward/health_test.go | 10 +++--- plugin/forward/persistent.go | 52 +++++++++++++++---------------- plugin/forward/persistent_test.go | 52 ++----------------------------- plugin/forward/proxy.go | 2 +- plugin/forward/proxy_test.go | 2 +- plugin/forward/setup.go | 5 +-- 8 files changed, 53 insertions(+), 105 deletions(-) diff --git a/plugin/forward/README.md b/plugin/forward/README.md index ae7ce67ba83..fd00253ddcb 100644 --- a/plugin/forward/README.md +++ b/plugin/forward/README.md @@ -102,7 +102,6 @@ If monitoring is enabled (via the *prometheus* directive) then the following met * `coredns_forward_healthcheck_failure_count_total{to}` - number of failed health checks per upstream. * `coredns_forward_healthcheck_broken_count_total{}` - counter of when all upstreams are unhealthy, and we are randomly (this always uses the `random` policy) spraying to an upstream. -* `coredns_forward_socket_count_total{to}` - number of cached sockets per upstream. Where `to` is one of the upstream servers (**TO** from the config), `rcode` is the returned RCODE from the upstream. diff --git a/plugin/forward/connect.go b/plugin/forward/connect.go index 8fde2224bf5..9ac1afe1641 100644 --- a/plugin/forward/connect.go +++ b/plugin/forward/connect.go @@ -44,17 +44,17 @@ func (t *Transport) updateDialTimeout(newDialTime time.Duration) { } // Dial dials the address configured in transport, potentially reusing a connection or creating a new one. -func (t *Transport) Dial(proto string) (*dns.Conn, bool, error) { +func (t *Transport) Dial(proto string) (*persistConn, bool, error) { // If tls has been configured; use it. if t.tlsConfig != nil { proto = "tcp-tls" } t.dial <- proto - c := <-t.ret + pc := <-t.ret - if c != nil { - return c, true, nil + if pc != nil { + return pc, true, nil } reqTime := time.Now() @@ -62,11 +62,11 @@ func (t *Transport) Dial(proto string) (*dns.Conn, bool, error) { if proto == "tcp-tls" { conn, err := dns.DialTimeoutWithTLS("tcp", t.addr, t.tlsConfig, timeout) t.updateDialTimeout(time.Since(reqTime)) - return conn, false, err + return &persistConn{c: conn}, false, err } conn, err := dns.DialTimeout(proto, t.addr, timeout) t.updateDialTimeout(time.Since(reqTime)) - return conn, false, err + return &persistConn{c: conn}, false, err } // Connect selects an upstream, sends the request and waits for a response. @@ -83,20 +83,20 @@ func (p *Proxy) Connect(ctx context.Context, state request.Request, opts options proto = state.Proto() } - conn, cached, err := p.transport.Dial(proto) + pc, cached, err := p.transport.Dial(proto) if err != nil { return nil, err } // Set buffer size correctly for this client. - conn.UDPSize = uint16(state.Size()) - if conn.UDPSize < 512 { - conn.UDPSize = 512 + pc.c.UDPSize = uint16(state.Size()) + if pc.c.UDPSize < 512 { + pc.c.UDPSize = 512 } - conn.SetWriteDeadline(time.Now().Add(maxTimeout)) - if err := conn.WriteMsg(state.Req); err != nil { - conn.Close() // not giving it back + pc.c.SetWriteDeadline(time.Now().Add(maxTimeout)) + if err := pc.c.WriteMsg(state.Req); err != nil { + pc.c.Close() // not giving it back if err == io.EOF && cached { return nil, ErrCachedClosed } @@ -104,11 +104,11 @@ func (p *Proxy) Connect(ctx context.Context, state request.Request, opts options } var ret *dns.Msg - conn.SetReadDeadline(time.Now().Add(readTimeout)) + pc.c.SetReadDeadline(time.Now().Add(readTimeout)) for { - ret, err = conn.ReadMsg() + ret, err = pc.c.ReadMsg() if err != nil { - conn.Close() // not giving it back + pc.c.Close() // not giving it back if err == io.EOF && cached { return nil, ErrCachedClosed } @@ -120,7 +120,7 @@ func (p *Proxy) Connect(ctx context.Context, state request.Request, opts options } } - p.transport.Yield(conn) + p.transport.Yield(pc) rc, ok := dns.RcodeToString[ret.Rcode] if !ok { diff --git a/plugin/forward/health_test.go b/plugin/forward/health_test.go index 3d06f283559..b1785eeaedc 100644 --- a/plugin/forward/health_test.go +++ b/plugin/forward/health_test.go @@ -29,7 +29,7 @@ func TestHealth(t *testing.T) { p := NewProxy(s.Addr, transport.DNS) f := New() f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -69,7 +69,7 @@ func TestHealthTimeout(t *testing.T) { p := NewProxy(s.Addr, transport.DNS) f := New() f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -113,7 +113,7 @@ func TestHealthFailTwice(t *testing.T) { p := NewProxy(s.Addr, transport.DNS) f := New() f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -137,7 +137,7 @@ func TestHealthMaxFails(t *testing.T) { f := New() f.maxfails = 2 f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -169,7 +169,7 @@ func TestHealthNoMaxFails(t *testing.T) { f := New() f.maxfails = 0 f.SetProxy(p) - defer f.Close() + defer f.OnShutdown() req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) diff --git a/plugin/forward/persistent.go b/plugin/forward/persistent.go index d1348f94d4c..17cc5680a25 100644 --- a/plugin/forward/persistent.go +++ b/plugin/forward/persistent.go @@ -24,8 +24,8 @@ type Transport struct { tlsConfig *tls.Config dial chan string - yield chan *dns.Conn - ret chan *dns.Conn + yield chan *persistConn + ret chan *persistConn stop chan bool } @@ -36,23 +36,13 @@ func newTransport(addr string) *Transport { expire: defaultExpire, addr: addr, dial: make(chan string), - yield: make(chan *dns.Conn), - ret: make(chan *dns.Conn), + yield: make(chan *persistConn), + ret: make(chan *persistConn), stop: make(chan bool), } return t } -// len returns the number of connection, used for metrics. Can only be safely -// used inside connManager() because of data races. -func (t *Transport) len() int { - l := 0 - for _, conns := range t.conns { - l += len(conns) - } - return l -} - // connManagers manages the persistent connection cache for UDP and TCP. func (t *Transport) connManager() { ticker := time.NewTicker(t.expire) @@ -66,7 +56,7 @@ Wait: if time.Since(pc.used) < t.expire { // Found one, remove from pool and return this conn. t.conns[proto] = stack[:len(stack)-1] - t.ret <- pc.c + t.ret <- pc continue Wait } // clear entire cache if the last conn is expired @@ -75,26 +65,21 @@ Wait: // transport methods anymore. So, it's safe to close them in a separate goroutine go closeConns(stack) } - SocketGauge.WithLabelValues(t.addr).Set(float64(t.len())) - t.ret <- nil - case conn := <-t.yield: - - SocketGauge.WithLabelValues(t.addr).Set(float64(t.len() + 1)) - + case pc := <-t.yield: // no proto here, infer from config and conn - if _, ok := conn.Conn.(*net.UDPConn); ok { - t.conns["udp"] = append(t.conns["udp"], &persistConn{conn, time.Now()}) + if _, ok := pc.c.Conn.(*net.UDPConn); ok { + t.conns["udp"] = append(t.conns["udp"], pc) continue Wait } if t.tlsConfig == nil { - t.conns["tcp"] = append(t.conns["tcp"], &persistConn{conn, time.Now()}) + t.conns["tcp"] = append(t.conns["tcp"], pc) continue Wait } - t.conns["tcp-tls"] = append(t.conns["tcp-tls"], &persistConn{conn, time.Now()}) + t.conns["tcp-tls"] = append(t.conns["tcp-tls"], pc) case <-ticker.C: t.cleanup(false) @@ -143,8 +128,23 @@ func (t *Transport) cleanup(all bool) { } } +// It is hard to pin a value to this, the import thing is to no block forever, loosing at cached connection is not terrible. +const yieldTimeout = 25 * time.Millisecond + // Yield return the connection to transport for reuse. -func (t *Transport) Yield(c *dns.Conn) { t.yield <- c } +func (t *Transport) Yield(pc *persistConn) { + pc.used = time.Now() // update used time + + // Make ths non-blocking, because in the case of a very busy forwarder we will *block* on this yield. This + // blocks the outer go-routine and stuff will just pile up. We timeout when the send fails to as returning + // these connection is an optimization anyway. + select { + case t.yield <- pc: + return + case <-time.After(yieldTimeout): + return + } +} // Start starts the transport's connection manager. func (t *Transport) Start() { go t.connManager() } diff --git a/plugin/forward/persistent_test.go b/plugin/forward/persistent_test.go index f1c906076b4..1fb239ca77d 100644 --- a/plugin/forward/persistent_test.go +++ b/plugin/forward/persistent_test.go @@ -82,54 +82,6 @@ func TestCleanupByTimer(t *testing.T) { tr.Yield(c4) } -func TestPartialCleanup(t *testing.T) { - s := dnstest.NewServer(func(w dns.ResponseWriter, r *dns.Msg) { - ret := new(dns.Msg) - ret.SetReply(r) - w.WriteMsg(ret) - }) - defer s.Close() - - tr := newTransport(s.Addr) - tr.SetExpire(100 * time.Millisecond) - tr.Start() - defer tr.Stop() - - c1, _, _ := tr.Dial("udp") - c2, _, _ := tr.Dial("udp") - c3, _, _ := tr.Dial("udp") - c4, _, _ := tr.Dial("udp") - c5, _, _ := tr.Dial("udp") - - tr.Yield(c1) - time.Sleep(10 * time.Millisecond) - tr.Yield(c2) - time.Sleep(10 * time.Millisecond) - tr.Yield(c3) - time.Sleep(50 * time.Millisecond) - tr.Yield(c4) - time.Sleep(10 * time.Millisecond) - tr.Yield(c5) - time.Sleep(40 * time.Millisecond) - - c6, _, _ := tr.Dial("udp") - if c6 != c5 { - t.Errorf("Expected c6 == c5") - } - c7, _, _ := tr.Dial("udp") - if c7 != c4 { - t.Errorf("Expected c7 == c4") - } - c8, cached, _ := tr.Dial("udp") - if cached { - t.Error("Expected non-cached connection (c8)") - } - - tr.Yield(c6) - tr.Yield(c7) - tr.Yield(c8) -} - func TestCleanupAll(t *testing.T) { s := dnstest.NewServer(func(w dns.ResponseWriter, r *dns.Msg) { ret := new(dns.Msg) @@ -150,12 +102,12 @@ func TestCleanupAll(t *testing.T) { {c3, time.Now()}, } - if tr.len() != 3 { + if len(tr.conns["udp"]) != 3 { t.Error("Expected 3 connections") } tr.cleanup(true) - if tr.len() > 0 { + if len(tr.conns["udp"]) > 0 { t.Error("Expected no cached connections") } } diff --git a/plugin/forward/proxy.go b/plugin/forward/proxy.go index 6485d8d726e..60dfa47a610 100644 --- a/plugin/forward/proxy.go +++ b/plugin/forward/proxy.go @@ -66,7 +66,7 @@ func (p *Proxy) Down(maxfails uint32) bool { } // close stops the health checking goroutine. -func (p *Proxy) close() { p.probe.Stop() } +func (p *Proxy) stop() { p.probe.Stop() } func (p *Proxy) finalizer() { p.transport.Stop() } // start starts the proxy's healthchecking. diff --git a/plugin/forward/proxy_test.go b/plugin/forward/proxy_test.go index 7075e1133ad..b962f561b4e 100644 --- a/plugin/forward/proxy_test.go +++ b/plugin/forward/proxy_test.go @@ -35,7 +35,7 @@ func TestProxyClose(t *testing.T) { go func() { p.Connect(ctx, state, options{}) }() go func() { p.Connect(ctx, state, options{forceTCP: true}) }() - p.close() + p.stop() } } diff --git a/plugin/forward/setup.go b/plugin/forward/setup.go index 3bfee9f01ec..d457566931a 100644 --- a/plugin/forward/setup.go +++ b/plugin/forward/setup.go @@ -54,14 +54,11 @@ func (f *Forward) OnStartup() (err error) { // OnShutdown stops all configured proxies. func (f *Forward) OnShutdown() error { for _, p := range f.proxies { - p.close() + p.stop() } return nil } -// Close is a synonym for OnShutdown(). -func (f *Forward) Close() { f.OnShutdown() } - func parseForward(c *caddy.Controller) (*Forward, error) { var ( f *Forward From 575cea4496fd8556ec198ea43c99a5c6cfc603b2 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 1 Oct 2019 20:45:52 +0100 Subject: [PATCH 222/324] Move map to array (#3339) * Move map to array The map was not needed move to an array, see #1941 for the original idea. That of course didn't apply anymore; make a super minimal change to implements the idea from #1941 Signed-off-by: Miek Gieben * Add total count Signed-off-by: Miek Gieben --- plugin/forward/persistent.go | 36 +++++++++++------------------- plugin/forward/persistent_test.go | 10 +++------ plugin/forward/type.go | 37 +++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 30 deletions(-) create mode 100644 plugin/forward/type.go diff --git a/plugin/forward/persistent.go b/plugin/forward/persistent.go index 17cc5680a25..7dc01326bc7 100644 --- a/plugin/forward/persistent.go +++ b/plugin/forward/persistent.go @@ -2,7 +2,6 @@ package forward import ( "crypto/tls" - "net" "sort" "time" @@ -17,9 +16,9 @@ type persistConn struct { // Transport hold the persistent cache. type Transport struct { - avgDialTime int64 // kind of average time of dial time - conns map[string][]*persistConn // Buckets for udp, tcp and tcp-tls. - expire time.Duration // After this duration a connection is expired. + avgDialTime int64 // kind of average time of dial time + conns [typeTotalCount][]*persistConn // Buckets for udp, tcp and tcp-tls. + expire time.Duration // After this duration a connection is expired. addr string tlsConfig *tls.Config @@ -32,7 +31,7 @@ type Transport struct { func newTransport(addr string) *Transport { t := &Transport{ avgDialTime: int64(maxDialTimeout / 2), - conns: make(map[string][]*persistConn), + conns: [typeTotalCount][]*persistConn{}, expire: defaultExpire, addr: addr, dial: make(chan string), @@ -50,17 +49,18 @@ Wait: for { select { case proto := <-t.dial: + transtype := stringToTransportType(proto) // take the last used conn - complexity O(1) - if stack := t.conns[proto]; len(stack) > 0 { + if stack := t.conns[transtype]; len(stack) > 0 { pc := stack[len(stack)-1] if time.Since(pc.used) < t.expire { // Found one, remove from pool and return this conn. - t.conns[proto] = stack[:len(stack)-1] + t.conns[transtype] = stack[:len(stack)-1] t.ret <- pc continue Wait } // clear entire cache if the last conn is expired - t.conns[proto] = nil + t.conns[transtype] = nil // now, the connections being passed to closeConns() are not reachable from // transport methods anymore. So, it's safe to close them in a separate goroutine go closeConns(stack) @@ -68,18 +68,8 @@ Wait: t.ret <- nil case pc := <-t.yield: - // no proto here, infer from config and conn - if _, ok := pc.c.Conn.(*net.UDPConn); ok { - t.conns["udp"] = append(t.conns["udp"], pc) - continue Wait - } - - if t.tlsConfig == nil { - t.conns["tcp"] = append(t.conns["tcp"], pc) - continue Wait - } - - t.conns["tcp-tls"] = append(t.conns["tcp-tls"], pc) + transtype := t.transportTypeFromConn(pc) + t.conns[transtype] = append(t.conns[transtype], pc) case <-ticker.C: t.cleanup(false) @@ -102,12 +92,12 @@ func closeConns(conns []*persistConn) { // cleanup removes connections from cache. func (t *Transport) cleanup(all bool) { staleTime := time.Now().Add(-t.expire) - for proto, stack := range t.conns { + for transtype, stack := range t.conns { if len(stack) == 0 { continue } if all { - t.conns[proto] = nil + t.conns[transtype] = nil // now, the connections being passed to closeConns() are not reachable from // transport methods anymore. So, it's safe to close them in a separate goroutine go closeConns(stack) @@ -121,7 +111,7 @@ func (t *Transport) cleanup(all bool) { good := sort.Search(len(stack), func(i int) bool { return stack[i].used.After(staleTime) }) - t.conns[proto] = stack[good:] + t.conns[transtype] = stack[good:] // now, the connections being passed to closeConns() are not reachable from // transport methods anymore. So, it's safe to close them in a separate goroutine go closeConns(stack[:good]) diff --git a/plugin/forward/persistent_test.go b/plugin/forward/persistent_test.go index 1fb239ca77d..9ea0cdc109d 100644 --- a/plugin/forward/persistent_test.go +++ b/plugin/forward/persistent_test.go @@ -96,18 +96,14 @@ func TestCleanupAll(t *testing.T) { c2, _ := dns.DialTimeout("udp", tr.addr, maxDialTimeout) c3, _ := dns.DialTimeout("udp", tr.addr, maxDialTimeout) - tr.conns["udp"] = []*persistConn{ - {c1, time.Now()}, - {c2, time.Now()}, - {c3, time.Now()}, - } + tr.conns[typeUdp] = []*persistConn{{c1, time.Now()}, {c2, time.Now()}, {c3, time.Now()}} - if len(tr.conns["udp"]) != 3 { + if len(tr.conns[typeUdp]) != 3 { t.Error("Expected 3 connections") } tr.cleanup(true) - if len(tr.conns["udp"]) > 0 { + if len(tr.conns[typeUdp]) > 0 { t.Error("Expected no cached connections") } } diff --git a/plugin/forward/type.go b/plugin/forward/type.go new file mode 100644 index 00000000000..83ddbc7c5e6 --- /dev/null +++ b/plugin/forward/type.go @@ -0,0 +1,37 @@ +package forward + +import "net" + +type transportType int + +const ( + typeUdp transportType = iota + typeTcp + typeTls + typeTotalCount // keep this last +) + +func stringToTransportType(s string) transportType { + switch s { + case "udp": + return typeUdp + case "tcp": + return typeTcp + case "tcp-tls": + return typeTls + } + + return typeUdp +} + +func (t *Transport) transportTypeFromConn(pc *persistConn) transportType { + if _, ok := pc.c.Conn.(*net.UDPConn); ok { + return typeUdp + } + + if t.tlsConfig == nil { + return typeTcp + } + + return typeTls +} From fa6718d02606852897c6c91cacf0f80c4d1a0243 Mon Sep 17 00:00:00 2001 From: Ingo Gottwald Date: Tue, 1 Oct 2019 22:32:59 +0200 Subject: [PATCH 223/324] Fix grpc test vet warning (#3341) This fixes the vet warning: the cancel function returned by context.WithTimeout should be called, not discarded, to avoid a context leak. Signed-off-by: Ingo Gottwald --- test/grpc_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/grpc_test.go b/test/grpc_test.go index a0d48490da4..bb49e6b9bac 100644 --- a/test/grpc_test.go +++ b/test/grpc_test.go @@ -22,7 +22,8 @@ func TestGrpc(t *testing.T) { } defer g.Stop() - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() conn, err := grpc.DialContext(ctx, tcp, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { t.Fatalf("Expected no error but got: %s", err) From 64f0345e630f05807b81cced12085b20c5c468cf Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 2 Oct 2019 07:22:26 +0100 Subject: [PATCH 224/324] plugin/erratic: doc and zone transfer (#3340) Fix the documentation, remove autopath entry and fix the transfer by copying some bits from the file plugin. Signed-off-by: Miek Gieben --- plugin/erratic/README.md | 17 +++++++---------- plugin/erratic/xfr.go | 10 ++++++++-- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/plugin/erratic/README.md b/plugin/erratic/README.md index dcd74097cc0..eeffd97fc19 100644 --- a/plugin/erratic/README.md +++ b/plugin/erratic/README.md @@ -6,15 +6,12 @@ ## Description -*erratic* returns a static response to all queries, but the responses can be delayed, dropped or truncated. -The *erratic* plugin will respond to every A or AAAA query. For any other type it will return -a SERVFAIL response. The reply for A will return 192.0.2.53 (see [RFC -5737](https://tools.ietf.org/html/rfc5737), -for AAAA it returns 2001:DB8::53 (see [RFC 3849](https://tools.ietf.org/html/rfc3849)) and for an -AXFR request it will respond with a small zone transfer. - -*erratic* can also be used in conjunction with the *autopath* plugin. This is mostly to aid in -testing. +*erratic* returns a static response to all queries, but the responses can be delayed, +dropped or truncated. The *erratic* plugin will respond to every A or AAAA query. For +any other type it will return a SERVFAIL response (except AXFR). The reply for A will return +192.0.2.53 ([RFC 5737](https://tools.ietf.org/html/rfc5737)), for AAAA it returns 2001:DB8::53 ([RFC +3849](https://tools.ietf.org/html/rfc3849)). For an AXFR request it will respond with a small +zone transfer. ## Syntax @@ -47,7 +44,7 @@ example.org { } ~~~ -Or even shorter if the defaults suits you. Note this only drops queries, it does not delay them. +Or even shorter if the defaults suit you. Note this only drops queries, it does not delay them. ~~~ corefile example.org { diff --git a/plugin/erratic/xfr.go b/plugin/erratic/xfr.go index eaaaf01fb22..e1ec77ee99c 100644 --- a/plugin/erratic/xfr.go +++ b/plugin/erratic/xfr.go @@ -2,6 +2,7 @@ package erratic import ( "strings" + "sync" "github.com/coredns/coredns/plugin/test" "github.com/coredns/coredns/request" @@ -46,6 +47,11 @@ func xfr(state request.Request, truncate bool) { close(ch) }() - tr.Out(state.W, state.Req, ch) - state.W.Hijack() + wg := new(sync.WaitGroup) + wg.Add(1) + go func() { + tr.Out(state.W, state.Req, ch) + wg.Done() + }() + wg.Wait() } From d48d8516e3e79e0dbcac4f0427ea46a522a30cf1 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Tue, 1 Oct 2019 23:42:29 -0700 Subject: [PATCH 225/324] Add clouddns/OWNERS file (#3346) Signed-off-by: Yong Tang --- plugin/clouddns/OWNERS | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 plugin/clouddns/OWNERS diff --git a/plugin/clouddns/OWNERS b/plugin/clouddns/OWNERS new file mode 100644 index 00000000000..716e8641ad1 --- /dev/null +++ b/plugin/clouddns/OWNERS @@ -0,0 +1,6 @@ +reviewers: + - miekg + - yongtang +approvers: + - miekg + - yongtang From 0da2c0c366a583b192004052bd9484712b07a636 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hauke=20L=C3=B6ffler?= Date: Wed, 2 Oct 2019 13:23:30 +0200 Subject: [PATCH 226/324] Use whitelist in .dockerignore (#3347) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Hauke Löffler --- .dockerignore | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/.dockerignore b/.dockerignore index 7dfc5a97e40..1ff16c577e9 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,14 +1,2 @@ -.git/* -core/* -coremain/* -hooks/* -man/* -pb/* -plugin/* -request/* -test/* -vendor/* -build/* -release/* -go.sum -go.mod +* +!coredns From 8fde7407d94feec02027de69d1ffa278ae0221ad Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Wed, 2 Oct 2019 23:18:36 +0100 Subject: [PATCH 227/324] plugin/clouddns: remove initialization from init (#3349) * plugin/clouddns: remove initialization from init Init should just call the plugin.Register with a setup function. Fixes: #3343 Signed-off-by: Miek Gieben * Fix placement for var f Signed-off-by: Miek Gieben --- plugin/clouddns/setup.go | 34 +++++++++++++++------------------- plugin/clouddns/setup_test.go | 4 ++-- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/plugin/clouddns/setup.go b/plugin/clouddns/setup.go index 35c7e9179e1..7de8efe8986 100644 --- a/plugin/clouddns/setup.go +++ b/plugin/clouddns/setup.go @@ -17,27 +17,23 @@ import ( var log = clog.NewWithPlugin("clouddns") -func init() { - plugin.Register("clouddns", - func(c *caddy.Controller) error { - f := func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { - var err error - var client *gcp.Service - if opt != nil { - client, err = gcp.NewService(ctx, opt) - } else { - // if credentials file is not provided in the Corefile - // authenticate the client using env variables - client, err = gcp.NewService(ctx) - } - return gcpClient{client}, err - } - return setup(c, f) - }, - ) +func init() { plugin.Register("clouddns", setup) } + +// exposed for testing +var f = func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { + var err error + var client *gcp.Service + if opt != nil { + client, err = gcp.NewService(ctx, opt) + } else { + // if credentials file is not provided in the Corefile + // authenticate the client using env variables + client, err = gcp.NewService(ctx) + } + return gcpClient{client}, err } -func setup(c *caddy.Controller, f func(ctx context.Context, opt option.ClientOption) (gcpDNS, error)) error { +func setup(c *caddy.Controller) error { for c.Next() { keyPairs := map[string]struct{}{} keys := map[string][]string{} diff --git a/plugin/clouddns/setup_test.go b/plugin/clouddns/setup_test.go index be9c51d92f4..6d2997298b4 100644 --- a/plugin/clouddns/setup_test.go +++ b/plugin/clouddns/setup_test.go @@ -9,7 +9,7 @@ import ( ) func TestSetupCloudDNS(t *testing.T) { - f := func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { + f = func(ctx context.Context, opt option.ClientOption) (gcpDNS, error) { return fakeGCPClient{}, nil } @@ -41,7 +41,7 @@ func TestSetupCloudDNS(t *testing.T) { for _, test := range tests { c := caddy.NewTestController("dns", test.body) - if err := setup(c, f); (err == nil) == test.expectedError { + if err := setup(c); (err == nil) == test.expectedError { t.Errorf("Unexpected errors: %v", err) } } From aa96d6b443ea58c8f7c80649f76b93e243fbb3f4 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 3 Oct 2019 07:21:11 +0100 Subject: [PATCH 228/324] plugin/route53: remove amazon intialization from init (#3348) Don't perform this code in the init, this allocated 1 megabyte of memory even if you don't use the plugin. Looks to be only there for testing, adding a comment to reflect that. Fixes #3342 Signed-off-by: Miek Gieben --- plugin/route53/setup.go | 18 ++++++------------ plugin/route53/setup_test.go | 4 ++-- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/plugin/route53/setup.go b/plugin/route53/setup.go index 3b47fad5931..c285bee2d76 100644 --- a/plugin/route53/setup.go +++ b/plugin/route53/setup.go @@ -24,20 +24,14 @@ import ( var log = clog.NewWithPlugin("route53") -func init() { - plugin.Register("route53", - func(c *caddy.Controller) error { - f := func(credential *credentials.Credentials) route53iface.Route53API { - return route53.New(session.Must(session.NewSession(&aws.Config{ - Credentials: credential, - }))) - } - return setup(c, f) - }, - ) +func init() { plugin.Register("route53", setup) } + +// exposed for testing +var f = func(credential *credentials.Credentials) route53iface.Route53API { + return route53.New(session.Must(session.NewSession(&aws.Config{Credentials: credential}))) } -func setup(c *caddy.Controller, f func(*credentials.Credentials) route53iface.Route53API) error { +func setup(c *caddy.Controller) error { for c.Next() { keyPairs := map[string]struct{}{} keys := map[string][]string{} diff --git a/plugin/route53/setup_test.go b/plugin/route53/setup_test.go index 998285e46f0..cb73d8fefef 100644 --- a/plugin/route53/setup_test.go +++ b/plugin/route53/setup_test.go @@ -9,7 +9,7 @@ import ( ) func TestSetupRoute53(t *testing.T) { - f := func(credential *credentials.Credentials) route53iface.Route53API { + f = func(credential *credentials.Credentials) route53iface.Route53API { return fakeRoute53{} } @@ -73,7 +73,7 @@ func TestSetupRoute53(t *testing.T) { for _, test := range tests { c := caddy.NewTestController("dns", test.body) - if err := setup(c, f); (err == nil) == test.expectedError { + if err := setup(c); (err == nil) == test.expectedError { t.Errorf("Unexpected errors: %v", err) } } From f8551df27284b93e9f95e064ec65a9df60f4a528 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 3 Oct 2019 15:05:44 +0100 Subject: [PATCH 229/324] cache: move goroutine closure to separate function to save memory (#3353) The goroutine closure was causing objects to be heap allocated. Moving it to a separate function fixes that. ```benchmark old ns/op new ns/op delta BenchmarkCacheResponse/NoPrefetch-12 773 713 -7.76% BenchmarkCacheResponse/Prefetch-12 878 837 -4.67% BenchmarkHash-12 9.17 9.18 +0.11% benchmark old allocs new allocs delta BenchmarkCacheResponse/NoPrefetch-12 9 8 -11.11% BenchmarkCacheResponse/Prefetch-12 9 8 -11.11% BenchmarkHash-12 0 0 +0.00% benchmark old bytes new bytes delta BenchmarkCacheResponse/NoPrefetch-12 471 327 -30.57% BenchmarkCacheResponse/Prefetch-12 471 327 -30.57% BenchmarkHash-12 0 0 +0.00% ``` Signed-off-by: Charlie Vieth Signed-off-by: Miek Gieben --- plugin/cache/handler.go | 45 +++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/plugin/cache/handler.go b/plugin/cache/handler.go index 2d608e8d353..4dc29167ad7 100644 --- a/plugin/cache/handler.go +++ b/plugin/cache/handler.go @@ -29,28 +29,10 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) i, found := c.get(now, state, server) if i != nil && found { resp := i.toMsg(r, now) - w.WriteMsg(resp) - if c.prefetch > 0 { - ttl := i.ttl(now) - i.Freq.Update(c.duration, now) - - threshold := int(math.Ceil(float64(c.percentage) / 100 * float64(i.origTTL))) - if i.Freq.Hits() >= c.prefetch && ttl <= threshold { - cw := newPrefetchResponseWriter(server, state, c) - go func(w dns.ResponseWriter) { - cachePrefetches.WithLabelValues(server).Inc() - plugin.NextOrFailure(c.Name(), c.Next, ctx, w, r) - - // When prefetching we loose the item i, and with it the frequency - // that we've gathered sofar. See we copy the frequencies info back - // into the new item that was stored in the cache. - if i1 := c.exists(state); i1 != nil { - i1.Freq.Reset(now, i.Freq.Hits()) - } - }(cw) - } + if c.shouldPrefetch(i, now) { + go c.doPrefetch(ctx, state, server, i, now) } return dns.RcodeSuccess, nil } @@ -59,6 +41,29 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r) } +func (c *Cache) doPrefetch(ctx context.Context, state request.Request, server string, i *item, now time.Time) { + cw := newPrefetchResponseWriter(server, state, c) + + cachePrefetches.WithLabelValues(server).Inc() + plugin.NextOrFailure(c.Name(), c.Next, ctx, cw, state.Req) + + // When prefetching we loose the item i, and with it the frequency + // that we've gathered sofar. See we copy the frequencies info back + // into the new item that was stored in the cache. + if i1 := c.exists(state); i1 != nil { + i1.Freq.Reset(now, i.Freq.Hits()) + } +} + +func (c *Cache) shouldPrefetch(i *item, now time.Time) bool { + if c.prefetch <= 0 { + return false + } + i.Freq.Update(c.duration, now) + threshold := int(math.Ceil(float64(c.percentage) / 100 * float64(i.origTTL))) + return i.Freq.Hits() >= c.prefetch && i.ttl(now) <= threshold +} + // Name implements the Handler interface. func (c *Cache) Name() string { return "cache" } From 03ea2ae955823612da2e3b2ebf21da7ab8eea1bd Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 4 Oct 2019 09:44:58 +0100 Subject: [PATCH 230/324] Make request.Request smaller (#3351) * Make request.Request smaller This makes the request struct smaller and removes the pointer to the do boolean (tri-bool) as size == 0 will indicate if we have cached it. Family can be a int8 because it only carries 3 values, Size itself is just a uint16 under the covers. This is a more comprehensive fix than #3292 Closes #3292 Signed-off-by: Miek Gieben * cache: fix test this now needs a valid response writter Signed-off-by: Miek Gieben --- plugin/cache/cache_test.go | 2 +- plugin/pkg/edns/edns.go | 2 +- request/request.go | 43 ++++++++++++++++---------------------- request/request_test.go | 2 +- 4 files changed, 21 insertions(+), 28 deletions(-) diff --git a/plugin/cache/cache_test.go b/plugin/cache/cache_test.go index 4afaf73c4da..b3235337274 100644 --- a/plugin/cache/cache_test.go +++ b/plugin/cache/cache_test.go @@ -191,7 +191,7 @@ func TestCache(t *testing.T) { m := tc.in.Msg() m = cacheMsg(m, tc) - state := request.Request{W: nil, Req: m} + state := request.Request{W: &test.ResponseWriter{}, Req: m} mt, _ := response.Typify(m, utc) valid, k := key(state.Name(), m, mt, state.Do()) diff --git a/plugin/pkg/edns/edns.go b/plugin/pkg/edns/edns.go index 68fb038657c..31f57ea9b89 100644 --- a/plugin/pkg/edns/edns.go +++ b/plugin/pkg/edns/edns.go @@ -63,7 +63,7 @@ func Version(req *dns.Msg) (*dns.Msg, error) { } // Size returns a normalized size based on proto. -func Size(proto string, size int) int { +func Size(proto string, size uint16) uint16 { if proto == "tcp" { return dns.MaxMsgSize } diff --git a/request/request.go b/request/request.go index 6f1a1de0e5f..76bb6a7876e 100644 --- a/request/request.go +++ b/request/request.go @@ -18,15 +18,16 @@ type Request struct { // Optional lowercased zone of this query. Zone string - // Cache size after first call to Size or Do. - size int - do *bool // nil: nothing, otherwise *do value + // Cache size after first call to Size or Do. If size is zero nothing has been cached yet. + // Both Size and Do set these values (and cache them). + size uint16 // UDP buffer size, or 64K in case of TCP. + do bool // DNSSEC OK value // Caches + family int8 // transport's family. name string // lowercase qname. ip string // client's ip. port string // client's port. - family int // transport's family. localPort string // server's port. localIP string // server's ip. } @@ -127,7 +128,7 @@ func Proto(w dns.ResponseWriter) string { // Family returns the family of the transport, 1 for IPv4 and 2 for IPv6. func (r *Request) Family() int { if r.family != 0 { - return r.family + return int(r.family) } var a net.IP @@ -141,26 +142,20 @@ func (r *Request) Family() int { if a.To4() != nil { r.family = 1 - return r.family + return 1 } r.family = 2 - return r.family + return 2 } // Do returns if the request has the DO (DNSSEC OK) bit set. func (r *Request) Do() bool { - if r.do != nil { - return *r.do + if r.size != 0 { + return r.do } - r.do = new(bool) - - if o := r.Req.IsEdns0(); o != nil { - *r.do = o.Do() - return *r.do - } - *r.do = false - return false + r.Size() + return r.do } // Len returns the length in bytes in the request. @@ -170,21 +165,19 @@ func (r *Request) Len() int { return r.Req.Len() } // Or when the request was over TCP, we return the maximum allowed size of 64K. func (r *Request) Size() int { if r.size != 0 { - return r.size + return int(r.size) } - size := 0 + size := uint16(0) if o := r.Req.IsEdns0(); o != nil { - if r.do == nil { - r.do = new(bool) - } - *r.do = o.Do() - size = int(o.UDPSize()) + r.do = o.Do() + size = o.UDPSize() } + // normalize size size = edns.Size(r.Proto(), size) r.size = size - return size + return int(size) } // SizeAndDo adds an OPT record that the reflects the intent from request. diff --git a/request/request_test.go b/request/request_test.go index a62fc51bfc1..0a3b1f2d8dc 100644 --- a/request/request_test.go +++ b/request/request_test.go @@ -13,7 +13,7 @@ func TestRequestDo(t *testing.T) { st := testRequest() st.Do() - if st.do == nil { + if !st.do { t.Errorf("Expected st.do to be set") } } From d7cdb992b4f96603d444b7ab058594563c5fe043 Mon Sep 17 00:00:00 2001 From: janluk Date: Fri, 4 Oct 2019 17:48:43 +0200 Subject: [PATCH 231/324] Measure and expose DNS programming latency from Kubernetes plugin. (#3171) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For now metric is measure only for headless services. Informer has been slighlty refactored, so the code can measure latency without storing extra fields on Endpoint struct. Signed-off-by: Janek Łukaszewicz Suggestions from code review Co-Authored-By: Chris O'Haver --- .gitignore | 2 + plugin/kubernetes/README.md | 16 ++++ plugin/kubernetes/controller.go | 72 +++++++++++---- plugin/kubernetes/metrics.go | 76 +++++++++++++++ plugin/kubernetes/metrics_test.go | 132 +++++++++++++++++++++++++++ plugin/kubernetes/object/endpoint.go | 8 +- plugin/kubernetes/object/informer.go | 21 +++-- plugin/kubernetes/object/object.go | 6 +- plugin/kubernetes/object/pod.go | 14 ++- plugin/kubernetes/object/service.go | 14 ++- plugin/kubernetes/setup.go | 6 ++ 11 files changed, 330 insertions(+), 37 deletions(-) create mode 100644 plugin/kubernetes/metrics.go create mode 100644 plugin/kubernetes/metrics_test.go diff --git a/.gitignore b/.gitignore index 89971ccd0cb..ec03a44a4aa 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,5 @@ coverage.txt .classpath .project .settings/** +build/ +release/ diff --git a/plugin/kubernetes/README.md b/plugin/kubernetes/README.md index c844bc0c2de..ccbd681d9c3 100644 --- a/plugin/kubernetes/README.md +++ b/plugin/kubernetes/README.md @@ -229,3 +229,19 @@ plugin is also enabled: * kubernetes/service: the service name in the query * kubernetes/client-namespace: the client pod's namespace, if `pods verified` mode is enabled * kubernetes/client-pod-name: the client pod's name, if `pods verified` mode is enabled + +## Metrics + +The *kubernetes* plugin exports the following *Prometheus* metrics. +* `coredns_kubernetes_dns_programming_latency_seconds{service_kind}` - exports the +[DNS programming latency SLI](https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md). + The metrics has the `service_kind` label that identifies the kind of the + [kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service). + It may take one of the three values: + * `cluster_ip` + * `headless_with_selector` + * `headless_without_selector` + +## Bugs + + * add support for other service types; only "headless_with_selector" is supported now diff --git a/plugin/kubernetes/controller.go b/plugin/kubernetes/controller.go index f5a3e7ffdcc..98f82341be7 100644 --- a/plugin/kubernetes/controller.go +++ b/plugin/kubernetes/controller.go @@ -88,8 +88,9 @@ type dnsControlOpts struct { namespaceLabelSelector *meta.LabelSelector namespaceSelector labels.Selector - zones []string - endpointNameMode bool + zones []string + endpointNameMode bool + skipAPIObjectsCleanup bool } // newDNSController creates a controller for CoreDNS. @@ -111,7 +112,7 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns &api.Service{}, cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, cache.Indexers{svcNameNamespaceIndex: svcNameNamespaceIndexFunc, svcIPIndex: svcIPIndexFunc}, - object.ToService, + object.DefaultProcessor(object.ToService(opts.skipAPIObjectsCleanup)), ) if opts.initPodCache { @@ -123,7 +124,7 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns &api.Pod{}, cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, cache.Indexers{podIPIndex: podIPIndexFunc}, - object.ToPod, + object.DefaultProcessor(object.ToPod(opts.skipAPIObjectsCleanup)), ) } @@ -134,9 +135,50 @@ func newdnsController(kubeClient kubernetes.Interface, opts dnsControlOpts) *dns WatchFunc: endpointsWatchFunc(dns.client, api.NamespaceAll, dns.selector), }, &api.Endpoints{}, - cache.ResourceEventHandlerFuncs{AddFunc: dns.Add, UpdateFunc: dns.Update, DeleteFunc: dns.Delete}, + cache.ResourceEventHandlerFuncs{}, cache.Indexers{epNameNamespaceIndex: epNameNamespaceIndexFunc, epIPIndex: epIPIndexFunc}, - object.ToEndpoints) + func(clientState cache.Indexer, h cache.ResourceEventHandler) cache.ProcessFunc { + return func(obj interface{}) error { + for _, d := range obj.(cache.Deltas) { + + apiEndpoints, obj := object.ToEndpoints(d.Object) + + switch d.Type { + case cache.Sync, cache.Added, cache.Updated: + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + h.OnUpdate(old, obj) + // endpoint updates can come frequently, make sure it's a change we care about + if !endpointsEquivalent(old.(*object.Endpoints), obj) { + dns.updateModifed() + recordDNSProgrammingLatency(dns.getServices(obj), apiEndpoints) + } + } else { + if err := clientState.Add(obj); err != nil { + return err + } + h.OnAdd(d.Object) + dns.updateModifed() + recordDNSProgrammingLatency(dns.getServices(obj), apiEndpoints) + } + case cache.Deleted: + if err := clientState.Delete(obj); err != nil { + return err + } + h.OnDelete(d.Object) + dns.updateModifed() + recordDNSProgrammingLatency(dns.getServices(obj), apiEndpoints) + } + if !opts.skipAPIObjectsCleanup { + *apiEndpoints = api.Endpoints{} + } + } + return nil + } + }) + } dns.nsLister, dns.nsController = cache.NewInformer( @@ -423,17 +465,6 @@ func (dns *dnsControl) detectChanges(oldObj, newObj interface{}) { switch ob := obj.(type) { case *object.Service: dns.updateModifed() - case *object.Endpoints: - if newObj == nil || oldObj == nil { - dns.updateModifed() - return - } - p := oldObj.(*object.Endpoints) - // endpoint updates can come frequently, make sure it's a change we care about - if endpointsEquivalent(p, ob) { - return - } - dns.updateModifed() case *object.Pod: dns.updateModifed() default: @@ -441,6 +472,10 @@ func (dns *dnsControl) detectChanges(oldObj, newObj interface{}) { } } +func (dns *dnsControl) getServices(endpoints *object.Endpoints) []*object.Service { + return dns.SvcIndex(object.EndpointsKey(endpoints.GetName(), endpoints.GetNamespace())) +} + // subsetsEquivalent checks if two endpoint subsets are significantly equivalent // I.e. that they have the same ready addresses, host names, ports (including protocol // and service names for SRV) @@ -483,6 +518,9 @@ func subsetsEquivalent(sa, sb object.EndpointSubset) bool { // endpointsEquivalent checks if the update to an endpoint is something // that matters to us or if they are effectively equivalent. func endpointsEquivalent(a, b *object.Endpoints) bool { + if a == nil || b == nil { + return false + } if len(a.Subsets) != len(b.Subsets) { return false diff --git a/plugin/kubernetes/metrics.go b/plugin/kubernetes/metrics.go new file mode 100644 index 00000000000..e85b4812014 --- /dev/null +++ b/plugin/kubernetes/metrics.go @@ -0,0 +1,76 @@ +package kubernetes + +import ( + "time" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/kubernetes/object" + "github.com/prometheus/client_golang/prometheus" + api "k8s.io/api/core/v1" +) + +const ( + subsystem = "kubernetes" +) + +var ( + // DnsProgrammingLatency is defined as the time it took to program a DNS instance - from the time + // a service or pod has changed to the time the change was propagated and was available to be + // served by a DNS server. + // The definition of this SLI can be found at https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md + // Note that the metrics is partially based on the time exported by the endpoints controller on + // the master machine. The measurement may be inaccurate if there is a clock drift between the + // node and master machine. + // The service_kind label can be one of: + // * cluster_ip + // * headless_with_selector + // * headless_without_selector + DnsProgrammingLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "dns_programming_latency_seconds", + // From 1 millisecond to ~17 minutes. + Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), + Help: "Histogram of the time (in seconds) it took to program a dns instance.", + }, []string{"service_kind"}) + + // durationSinceFunc returns the duration elapsed since the given time. + // Added as a global variable to allow injection for testing. + durationSinceFunc = time.Since +) + +func recordDNSProgrammingLatency(svcs []*object.Service, endpoints *api.Endpoints) { + // getLastChangeTriggerTime is the time.Time value of the EndpointsLastChangeTriggerTime + // annotation stored in the given endpoints object or the "zero" time if the annotation wasn't set + var lastChangeTriggerTime time.Time + stringVal, ok := endpoints.Annotations[api.EndpointsLastChangeTriggerTime] + if ok { + ts, err := time.Parse(time.RFC3339Nano, stringVal) + if err != nil { + log.Warningf("DnsProgrammingLatency cannot be calculated for Endpoints '%s/%s'; invalid %q annotation RFC3339 value of %q", + endpoints.GetNamespace(), endpoints.GetName(), api.EndpointsLastChangeTriggerTime, stringVal) + // In case of error val = time.Zero, which is ignored in the upstream code. + } + lastChangeTriggerTime = ts + } + + // isHeadless indicates whether the endpoints object belongs to a headless + // service (i.e. clusterIp = None). Note that this can be a false negatives if the service + // informer is lagging, i.e. we may not see a recently created service. Given that the services + // don't change very often (comparing to much more frequent endpoints changes), cases when this method + // will return wrong answer should be relatively rare. Because of that we intentionally accept this + // flaw to keep the solution simple. + isHeadless := len(svcs) == 1 && svcs[0].ClusterIP == api.ClusterIPNone + + if endpoints == nil || !isHeadless || lastChangeTriggerTime.IsZero() { + return + } + + // If we're here it means that the Endpoints object is for a headless service and that + // the Endpoints object was created by the endpoints-controller (because the + // LastChangeTriggerTime annotation is set). It means that the corresponding service is a + // "headless service with selector". + DnsProgrammingLatency.WithLabelValues("headless_with_selector"). + Observe(durationSinceFunc(lastChangeTriggerTime).Seconds()) + +} diff --git a/plugin/kubernetes/metrics_test.go b/plugin/kubernetes/metrics_test.go new file mode 100644 index 00000000000..b9d3a9509a5 --- /dev/null +++ b/plugin/kubernetes/metrics_test.go @@ -0,0 +1,132 @@ +package kubernetes + +import ( + "strings" + "testing" + "time" + + "github.com/coredns/coredns/plugin/kubernetes/object" + + "github.com/prometheus/client_golang/prometheus/testutil" + api "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" +) + +const ( + namespace = "testns" +) + +func TestDnsProgrammingLatency(t *testing.T) { + client := fake.NewSimpleClientset() + now := time.Now() + controller := newdnsController(client, dnsControlOpts{ + initEndpointsCache: true, + // This is needed as otherwise the fake k8s client doesn't work properly. + skipAPIObjectsCleanup: true, + }) + durationSinceFunc = func(t time.Time) time.Duration { + return now.Sub(t) + } + DnsProgrammingLatency.Reset() + go controller.Run() + + subset1 := []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", Hostname: "foo"}}, + }} + + subset2 := []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "1.2.3.5", Hostname: "foo"}}, + }} + + createService(t, client, controller, "my-service", api.ClusterIPNone) + createEndpoints(t, client, "my-service", now.Add(-2*time.Second), subset1) + updateEndpoints(t, client, "my-service", now.Add(-1*time.Second), subset2) + + createEndpoints(t, client, "endpoints-no-service", now.Add(-4*time.Second), nil) + + createService(t, client, controller, "clusterIP-service", "10.40.0.12") + createEndpoints(t, client, "clusterIP-service", now.Add(-8*time.Second), nil) + + createService(t, client, controller, "headless-no-annotation", api.ClusterIPNone) + createEndpoints(t, client, "headless-no-annotation", nil, nil) + + createService(t, client, controller, "headless-wrong-annotation", api.ClusterIPNone) + createEndpoints(t, client, "headless-wrong-annotation", "wrong-value", nil) + + controller.Stop() + expected := ` + # HELP coredns_kubernetes_dns_programming_latency_seconds Histogram of the time (in seconds) it took to program a dns instance. + # TYPE coredns_kubernetes_dns_programming_latency_seconds histogram + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.001"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.002"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.004"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.008"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.016"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.032"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.064"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.128"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.256"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.512"} 0 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="1.024"} 1 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="2.048"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="4.096"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="8.192"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="16.384"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="32.768"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="65.536"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="131.072"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="262.144"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="524.288"} 2 + coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="+Inf"} 2 + coredns_kubernetes_dns_programming_latency_seconds_sum{service_kind="headless_with_selector"} 3 + coredns_kubernetes_dns_programming_latency_seconds_count{service_kind="headless_with_selector"} 2 + ` + if err := testutil.CollectAndCompare(DnsProgrammingLatency, strings.NewReader(expected)); err != nil { + t.Error(err) + } +} + +func buildEndpoints(name string, lastChangeTriggerTime interface{}, subsets []api.EndpointSubset) *api.Endpoints { + annotations := make(map[string]string) + switch v := lastChangeTriggerTime.(type) { + case string: + annotations[api.EndpointsLastChangeTriggerTime] = v + case time.Time: + annotations[api.EndpointsLastChangeTriggerTime] = v.Format(time.RFC3339Nano) + } + return &api.Endpoints{ + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: name, Annotations: annotations}, + Subsets: subsets, + } +} + +func createEndpoints(t *testing.T, client kubernetes.Interface, name string, triggerTime interface{}, subsets []api.EndpointSubset) { + _, err := client.CoreV1().Endpoints(namespace).Create(buildEndpoints(name, triggerTime, subsets)) + if err != nil { + t.Fatal(err) + } +} + +func updateEndpoints(t *testing.T, client kubernetes.Interface, name string, triggerTime interface{}, subsets []api.EndpointSubset) { + _, err := client.CoreV1().Endpoints(namespace).Update(buildEndpoints(name, triggerTime, subsets)) + if err != nil { + t.Fatal(err) + } +} + +func createService(t *testing.T, client kubernetes.Interface, controller dnsController, name string, clusterIp string) { + if _, err := client.CoreV1().Services(namespace).Create(&api.Service{ + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: name}, + Spec: api.ServiceSpec{ClusterIP: clusterIp}, + }); err != nil { + t.Fatal(err) + } + if err := wait.PollImmediate(10*time.Millisecond, 10*time.Second, func() (bool, error) { + return len(controller.SvcIndex(object.ServiceKey(name, namespace))) == 1, nil + }); err != nil { + t.Fatal(err) + } +} diff --git a/plugin/kubernetes/object/endpoint.go b/plugin/kubernetes/object/endpoint.go index b2c77fc106d..c7d6b7323c4 100644 --- a/plugin/kubernetes/object/endpoint.go +++ b/plugin/kubernetes/object/endpoint.go @@ -44,10 +44,10 @@ type EndpointPort struct { func EndpointsKey(name, namespace string) string { return name + "." + namespace } // ToEndpoints converts an api.Endpoints to a *Endpoints. -func ToEndpoints(obj interface{}) interface{} { +func ToEndpoints(obj interface{}) (*api.Endpoints, *Endpoints) { end, ok := obj.(*api.Endpoints) if !ok { - return nil + return nil, nil } e := &Endpoints{ @@ -93,9 +93,7 @@ func ToEndpoints(obj interface{}) interface{} { } } - *end = api.Endpoints{} - - return e + return end, e } // CopyWithoutSubsets copies e, without the subsets. diff --git a/plugin/kubernetes/object/informer.go b/plugin/kubernetes/object/informer.go index 919fce12d24..bd4d05d3055 100644 --- a/plugin/kubernetes/object/informer.go +++ b/plugin/kubernetes/object/informer.go @@ -5,19 +5,25 @@ import ( "k8s.io/client-go/tools/cache" ) -// NewIndexerInformer is a copy of the cache.NewIndexInformer function, but allows Process to have a conversion function (ToFunc). -func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, h cache.ResourceEventHandler, indexers cache.Indexers, convert ToFunc) (cache.Indexer, cache.Controller) { +// NewIndexerInformer is a copy of the cache.NewIndexerInformer function, but allows custom process function +func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, h cache.ResourceEventHandler, indexers cache.Indexers, builder ProcessorBuilder) (cache.Indexer, cache.Controller) { clientState := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, indexers) - fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, clientState) - cfg := &cache.Config{ - Queue: fifo, + Queue: cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, clientState), ListerWatcher: lw, ObjectType: objType, FullResyncPeriod: defaultResyncPeriod, RetryOnError: false, - Process: func(obj interface{}) error { + Process: builder(clientState, h), + } + return clientState, cache.New(cfg) +} + +// DefaultProcessor is a copy of Process function from cache.NewIndexerInformer except it does a conversion. +func DefaultProcessor(convert ToFunc) ProcessorBuilder { + return func(clientState cache.Indexer, h cache.ResourceEventHandler) cache.ProcessFunc { + return func(obj interface{}) error { for _, d := range obj.(cache.Deltas) { obj := convert(d.Object) @@ -43,9 +49,8 @@ func NewIndexerInformer(lw cache.ListerWatcher, objType runtime.Object, h cache. } } return nil - }, + } } - return clientState, cache.New(cfg) } const defaultResyncPeriod = 0 diff --git a/plugin/kubernetes/object/object.go b/plugin/kubernetes/object/object.go index 6b1c7d83903..132b5be6c6a 100644 --- a/plugin/kubernetes/object/object.go +++ b/plugin/kubernetes/object/object.go @@ -16,14 +16,18 @@ package object import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" ) // ToFunc converts one empty interface to another. type ToFunc func(interface{}) interface{} +// ProcessorBuilder returns function to process cache events. +type ProcessorBuilder func(cache.Indexer, cache.ResourceEventHandler) cache.ProcessFunc + // Empty is an empty struct. type Empty struct{} diff --git a/plugin/kubernetes/object/pod.go b/plugin/kubernetes/object/pod.go index 072d8d56d87..9fc9b5726f9 100644 --- a/plugin/kubernetes/object/pod.go +++ b/plugin/kubernetes/object/pod.go @@ -16,8 +16,14 @@ type Pod struct { *Empty } -// ToPod converts an api.Pod to a *Pod. -func ToPod(obj interface{}) interface{} { +// ToPod returns a function that converts an api.Pod to a *Pod. +func ToPod(skipCleanup bool) ToFunc { + return func(obj interface{}) interface{} { + return toPod(skipCleanup, obj) + } +} + +func toPod(skipCleanup bool, obj interface{}) interface{} { pod, ok := obj.(*api.Pod) if !ok { return nil @@ -35,7 +41,9 @@ func ToPod(obj interface{}) interface{} { return nil } - *pod = api.Pod{} + if !skipCleanup { + *pod = api.Pod{} + } return p } diff --git a/plugin/kubernetes/object/service.go b/plugin/kubernetes/object/service.go index a41100ab927..295715e2dc0 100644 --- a/plugin/kubernetes/object/service.go +++ b/plugin/kubernetes/object/service.go @@ -26,8 +26,14 @@ type Service struct { // ServiceKey return a string using for the index. func ServiceKey(name, namespace string) string { return name + "." + namespace } -// ToService converts an api.Service to a *Service. -func ToService(obj interface{}) interface{} { +// ToService returns a function that converts an api.Service to a *Service. +func ToService(skipCleanup bool) ToFunc { + return func(obj interface{}) interface{} { + return toService(skipCleanup, obj) + } +} + +func toService(skipCleanup bool, obj interface{}) interface{} { svc, ok := obj.(*api.Service) if !ok { return nil @@ -58,7 +64,9 @@ func ToService(obj interface{}) interface{} { s.ExternalIPs[li+i] = lb.IP } - *svc = api.Service{} + if !skipCleanup { + *svc = api.Service{} + } return s } diff --git a/plugin/kubernetes/setup.go b/plugin/kubernetes/setup.go index 63d85fee967..b35d653e11e 100644 --- a/plugin/kubernetes/setup.go +++ b/plugin/kubernetes/setup.go @@ -10,6 +10,7 @@ import ( "github.com/coredns/coredns/core/dnsserver" "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/metrics" "github.com/coredns/coredns/plugin/pkg/dnsutil" clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/plugin/pkg/parse" @@ -51,6 +52,11 @@ func setup(c *caddy.Controller) error { k.RegisterKubeCache(c) + c.OnStartup(func() error { + metrics.MustRegister(c, DnsProgrammingLatency) + return nil + }) + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { k.Next = next return k From ea54bc0ad5f2d94c9083369fdbbcad58fc6c42e9 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 5 Oct 2019 00:58:03 +0100 Subject: [PATCH 232/324] Upgrade to miekg/dns 1.1.22 (#3356) Signed-off-by: Miek Gieben --- go.mod | 9 +++++---- go.sum | 23 +++++++++++++++-------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 260153c6ebe..3a443aab365 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/miekg/dns v1.1.17 + github.com/miekg/dns v1.1.22 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 @@ -40,8 +40,9 @@ require ( github.com/tinylib/msgp v1.1.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c - golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 - golang.org/x/sys v0.0.0-20190904154756-749cb33beabd + golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc + golang.org/x/net v0.0.0-20191003171128-d98b1b443823 // indirect + golang.org/x/sys v0.0.0-20191003212358-c178f38b412c google.golang.org/api v0.10.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.24.0 @@ -57,6 +58,6 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible - github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.17 + github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.22 golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 => golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 ) diff --git a/go.sum b/go.sum index c70341d8f49..bebf560cc34 100644 --- a/go.sum +++ b/go.sum @@ -47,8 +47,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.24.3 h1:113A33abx/cqv0ga94D8z9elza1YEm749ltJI67Uhq4= -github.com/aws/aws-sdk-go v1.24.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.1 h1:d7zDXFT2Tgq/yw7Wku49+lKisE8Xc85erb+8PlE/Shk= github.com/aws/aws-sdk-go v1.25.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -224,8 +222,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJC3+BNM6LJs1x21rivK7yxfTZMAuY2s= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.17 h1:BhJxdA7bH51vKFZSY8Sn9pR7++LREvg0eYFzHA452ew= -github.com/miekg/dns v1.1.17/go.mod h1:WgzbA6oji13JREwiNsRDNfl7jYdPnmz+VEuLrA+/48M= +github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= +github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -328,8 +326,10 @@ golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= -golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -359,6 +359,10 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823 h1:Ypyv6BNJh07T1pUSrehkLemqPKXhus2MkfktJ91kRh4= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -390,8 +394,11 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191003212358-c178f38b412c h1:6Zx7DRlKXf79yfxuQ/7GqV3w2y7aDsk6bGg0MzF5RVU= +golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= From 62d60023236b018419891dad1d6a7a1b37331ed7 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 5 Oct 2019 08:51:34 +0100 Subject: [PATCH 233/324] upgrade etcd remove pin (#3359) This upgrade etcd to v3.4.1: go get go.etcd.io/etcd@a14579fbfb1a000439a40abf71862df51b0a2136 this is the commit latest on the release page. Signed-off-by: Miek Gieben --- go.mod | 3 +-- go.sum | 13 +++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 3a443aab365..1d1c1a0eb88 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/spf13/pflag v1.0.3 // indirect github.com/tinylib/msgp v1.1.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect - go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c + go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc golang.org/x/net v0.0.0-20191003171128-d98b1b443823 // indirect golang.org/x/sys v0.0.0-20191003212358-c178f38b412c @@ -59,5 +59,4 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.22 - golang.org/x/net v0.0.0-20190813000000-74dc4d7220e7 => golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 ) diff --git a/go.sum b/go.sum index bebf560cc34..101f1d54125 100644 --- a/go.sum +++ b/go.sum @@ -180,6 +180,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 h1:d3VSuNcgTCn21dNMm8g412Fck/XWFmMj4nJhhHT7ZZ0= github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062/go.mod h1:PcNJqIlcX/dj3DTG/+QQnRvSgTMG6CLpRMjWcv4+J6w= @@ -288,6 +289,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= @@ -307,8 +309,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c h1:cmCkyKubPxaCrx7tK7JZbgVkzLIxUgsZyfkvnCUgs08= -go.etcd.io/etcd v0.0.0-20190823073701-67d0c21bb04c/go.mod h1:tQYIqsNuGzkF9ncfEtoEX0qkoBhzw6ih5N1xcdGnvek= +go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a h1:kw8SHTZWndtiiC6ht2gBebCOGycQHLGERawMZljmbAY= +go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -357,8 +359,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823 h1:Ypyv6BNJh07T1pUSrehkLemqPKXhus2MkfktJ91kRh4= @@ -394,6 +395,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -445,8 +447,7 @@ google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dT google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 h1:n0pmVs3TxCRC9Jrd1/aNaB8roW5ZHMw0UOWPxAoUkkI= From f1415e85738911700d6cbd5156f52cef77404ae0 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 5 Oct 2019 10:48:51 +0100 Subject: [PATCH 234/324] circle CI: run with latest Go (#3358) * circle CI: run with latest Go We compile coredns with latest Go on each release, do this same in Circle-ci Signed-off-by: Miek Gieben --- .circleci/config.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c0735ddc5f1..6bfb2d013b3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -13,7 +13,8 @@ initWorkingDir: &initWorkingDir GOROOT=$(go env GOROOT) sudo rm -r $(go env GOROOT) sudo mkdir $GOROOT - curl https://dl.google.com/go/go1.12.5.linux-amd64.tar.gz | sudo tar xz -C $GOROOT --strip-components=1 + LATEST=$(curl -s https://golang.org/VERSION?m=text) + curl https://dl.google.com/go/${LATEST}.linux-amd64.tar.gz | sudo tar xz -C $GOROOT --strip-components=1 integrationDefaults: &integrationDefaults machine: @@ -55,7 +56,7 @@ jobs: name: Run Kubernetes tests command: | cd ~/go/src/${CIRCLE_PROJECT_USERNAME}/ci/test/kubernetes - GO111MODULE=on go test -v ./... + go test -v ./... workflows: version: 2 From ffe6225ff6b1c55b4d9a89a016c4f1aed98dc026 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Mon, 7 Oct 2019 16:38:46 +0100 Subject: [PATCH 235/324] kubernetes: brush up README, rename metric (#3360) Other latency metrics have `_duration` in the name change this metric to be in sync with the other ones. Signed-off-by: Miek Gieben --- plugin/cache/README.md | 2 +- plugin/kubernetes/README.md | 16 +++++----- plugin/kubernetes/metrics.go | 3 +- plugin/kubernetes/metrics_test.go | 50 +++++++++++++++---------------- 4 files changed, 35 insertions(+), 36 deletions(-) diff --git a/plugin/cache/README.md b/plugin/cache/README.md index 9f97ba437af..3f0847f26fa 100644 --- a/plugin/cache/README.md +++ b/plugin/cache/README.md @@ -61,7 +61,7 @@ Since shards don't fill up perfectly evenly, evictions will occur before the ent Each shard capacity is equal to the total cache size / number of shards (256). Eviction is random, not TTL based. Entries with 0 TTL will remain in the cache until randomly evicted when the shard reaches capacity. -## Metrics +# Metrics If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: diff --git a/plugin/kubernetes/README.md b/plugin/kubernetes/README.md index ccbd681d9c3..74de47ed7a0 100644 --- a/plugin/kubernetes/README.md +++ b/plugin/kubernetes/README.md @@ -46,7 +46,6 @@ kubernetes [ZONES...] { } ``` - * `endpoint` specifies the **URL** for a remote k8s API endpoint. If omitted, it will connect to k8s in-cluster using the cluster service account. * `tls` **CERT** **KEY** **CACERT** are the TLS cert, key and the CA cert file names for remote k8s connection. @@ -210,15 +209,15 @@ or the word "any"), then that label will match all values. The labels that acce * multiple wildcards are allowed in a single query, e.g., `A` Request `*.*.svc.zone.` or `SRV` request `*.*.*.*.svc.zone.` For example, wildcards can be used to resolve all Endpoints for a Service as `A` records. e.g.: `*.service.ns.svc.myzone.local` will return the Endpoint IPs in the Service `service` in namespace `default`: - ``` + +``` *.service.default.svc.cluster.local. 5 IN A 192.168.10.10 *.service.default.svc.cluster.local. 5 IN A 192.168.25.15 ``` - This response can be randomized using the `loadbalance` plugin ## Metadata -The kubernetes plugin will publish the following metadata, if the _metadata_ +The kubernetes plugin will publish the following metadata, if the *metadata* plugin is also enabled: * kubernetes/endpoint: the endpoint name in the query @@ -232,9 +231,10 @@ plugin is also enabled: ## Metrics -The *kubernetes* plugin exports the following *Prometheus* metrics. -* `coredns_kubernetes_dns_programming_latency_seconds{service_kind}` - exports the -[DNS programming latency SLI](https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md). +If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: + +* `coredns_kubernetes_dns_programming_duration_seconds{service_kind}` - Exports the + [DNS programming latency SLI](https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md). The metrics has the `service_kind` label that identifies the kind of the [kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service). It may take one of the three values: @@ -244,4 +244,4 @@ The *kubernetes* plugin exports the following *Prometheus* metrics. ## Bugs - * add support for other service types; only "headless_with_selector" is supported now +The duration metric only supports the "headless_with_selector" service currently. diff --git a/plugin/kubernetes/metrics.go b/plugin/kubernetes/metrics.go index e85b4812014..5547add89cc 100644 --- a/plugin/kubernetes/metrics.go +++ b/plugin/kubernetes/metrics.go @@ -28,7 +28,7 @@ var ( DnsProgrammingLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: plugin.Namespace, Subsystem: subsystem, - Name: "dns_programming_latency_seconds", + Name: "dns_programming_duration_seconds", // From 1 millisecond to ~17 minutes. Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), Help: "Histogram of the time (in seconds) it took to program a dns instance.", @@ -72,5 +72,4 @@ func recordDNSProgrammingLatency(svcs []*object.Service, endpoints *api.Endpoint // "headless service with selector". DnsProgrammingLatency.WithLabelValues("headless_with_selector"). Observe(durationSinceFunc(lastChangeTriggerTime).Seconds()) - } diff --git a/plugin/kubernetes/metrics_test.go b/plugin/kubernetes/metrics_test.go index b9d3a9509a5..96039c62f45 100644 --- a/plugin/kubernetes/metrics_test.go +++ b/plugin/kubernetes/metrics_test.go @@ -58,31 +58,31 @@ func TestDnsProgrammingLatency(t *testing.T) { controller.Stop() expected := ` - # HELP coredns_kubernetes_dns_programming_latency_seconds Histogram of the time (in seconds) it took to program a dns instance. - # TYPE coredns_kubernetes_dns_programming_latency_seconds histogram - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.001"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.002"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.004"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.008"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.016"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.032"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.064"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.128"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.256"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="0.512"} 0 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="1.024"} 1 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="2.048"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="4.096"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="8.192"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="16.384"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="32.768"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="65.536"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="131.072"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="262.144"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="524.288"} 2 - coredns_kubernetes_dns_programming_latency_seconds_bucket{service_kind="headless_with_selector",le="+Inf"} 2 - coredns_kubernetes_dns_programming_latency_seconds_sum{service_kind="headless_with_selector"} 3 - coredns_kubernetes_dns_programming_latency_seconds_count{service_kind="headless_with_selector"} 2 + # HELP coredns_kubernetes_dns_programming_duration_seconds Histogram of the time (in seconds) it took to program a dns instance. + # TYPE coredns_kubernetes_dns_programming_duration_seconds histogram + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.001"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.002"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.004"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.008"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.016"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.032"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.064"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.128"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.256"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="0.512"} 0 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="1.024"} 1 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="2.048"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="4.096"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="8.192"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="16.384"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="32.768"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="65.536"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="131.072"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="262.144"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="524.288"} 2 + coredns_kubernetes_dns_programming_duration_seconds_bucket{service_kind="headless_with_selector",le="+Inf"} 2 + coredns_kubernetes_dns_programming_duration_seconds_sum{service_kind="headless_with_selector"} 3 + coredns_kubernetes_dns_programming_duration_seconds_count{service_kind="headless_with_selector"} 2 ` if err := testutil.CollectAndCompare(DnsProgrammingLatency, strings.NewReader(expected)); err != nil { t.Error(err) From 65458b2de24bd748e2983e34c779f5b04d38f20c Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 8 Oct 2019 10:20:48 +0100 Subject: [PATCH 236/324] Directive -> plugin (#3363) Caught my eye, we name things directive still, esp when talking about the prometheus *plugin*. Rename everything that needs to be plugin to 'plugin'. Also make sure Metrics is a H2 section (not H1). Signed-off-by: Miek Gieben --- plugin/autopath/README.md | 2 +- plugin/bind/README.md | 2 +- plugin/cache/README.md | 4 ++-- plugin/dnssec/README.md | 2 +- plugin/file/README.md | 4 ++-- plugin/forward/README.md | 2 +- plugin/grpc/README.md | 2 +- plugin/health/README.md | 2 +- plugin/hosts/README.md | 4 ++-- plugin/import/README.md | 2 +- plugin/kubernetes/README.md | 4 ++-- plugin/reload/README.md | 2 +- plugin/sign/README.md | 4 ++-- plugin/template/README.md | 2 +- plugin/tls/README.md | 2 +- 15 files changed, 20 insertions(+), 20 deletions(-) diff --git a/plugin/autopath/README.md b/plugin/autopath/README.md index 96b0e5beb90..426151ce7b6 100644 --- a/plugin/autopath/README.md +++ b/plugin/autopath/README.md @@ -27,7 +27,7 @@ If a plugin implements the `AutoPather` interface then it can be used. ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metric is exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metric is exported: * `coredns_autopath_success_count_total{server}` - counter of successfully autopath-ed queries. diff --git a/plugin/bind/README.md b/plugin/bind/README.md index 8561d0b3c60..fec5511aba3 100644 --- a/plugin/bind/README.md +++ b/plugin/bind/README.md @@ -40,7 +40,7 @@ To allow processing DNS requests only local host on both IPv4 and IPv6 stacks, u } ~~~ -If the configuration comes up with several *bind* directives, all addresses are consolidated together: +If the configuration comes up with several *bind* plugins, all addresses are consolidated together: The following sample is equivalent to the preceding: ~~~ corefile diff --git a/plugin/cache/README.md b/plugin/cache/README.md index 3f0847f26fa..2958f90c93d 100644 --- a/plugin/cache/README.md +++ b/plugin/cache/README.md @@ -61,9 +61,9 @@ Since shards don't fill up perfectly evenly, evictions will occur before the ent Each shard capacity is equal to the total cache size / number of shards (256). Eviction is random, not TTL based. Entries with 0 TTL will remain in the cache until randomly evicted when the shard reaches capacity. -# Metrics +## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: * `coredns_cache_size{server, type}` - Total elements in the cache by cache type. * `coredns_cache_hits_total{server, type}` - Counter of cache hits by cache type. diff --git a/plugin/dnssec/README.md b/plugin/dnssec/README.md index abb222dd786..ef4eb7b5d39 100644 --- a/plugin/dnssec/README.md +++ b/plugin/dnssec/README.md @@ -51,7 +51,7 @@ used (See [bugs](#bugs)). ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: * `coredns_dnssec_cache_size{server, type}` - total elements in the cache, type is "signature". * `coredns_dnssec_cache_hits_total{server}` - Counter of cache hits. diff --git a/plugin/file/README.md b/plugin/file/README.md index 58cfe9586ff..e80b6b0dccd 100644 --- a/plugin/file/README.md +++ b/plugin/file/README.md @@ -18,9 +18,9 @@ file DBFILE [ZONES...] ~~~ * **DBFILE** the database file to read and parse. If the path is relative, the path from the *root* - directive will be prepended to it. + plugin will be prepended to it. * **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block - are used. + are used. If you want to round-robin A and AAAA responses look at the *loadbalance* plugin. diff --git a/plugin/forward/README.md b/plugin/forward/README.md index fd00253ddcb..f9c2f482b56 100644 --- a/plugin/forward/README.md +++ b/plugin/forward/README.md @@ -94,7 +94,7 @@ On each endpoint, the timeouts of the communication are set by default and autom ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metric are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metric are exported: * `coredns_forward_request_duration_seconds{to}` - duration per upstream interaction. * `coredns_forward_request_count_total{to}` - query count per upstream. diff --git a/plugin/grpc/README.md b/plugin/grpc/README.md index 09b499bb7d3..36314232a3d 100644 --- a/plugin/grpc/README.md +++ b/plugin/grpc/README.md @@ -60,7 +60,7 @@ Also note the TLS config is "global" for the whole grpc proxy if you need a diff ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metric are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metric are exported: * `coredns_grpc_request_duration_seconds{to}` - duration per upstream interaction. * `coredns_grpc_request_count_total{to}` - query count per upstream. diff --git a/plugin/health/README.md b/plugin/health/README.md index e6fc56e80e0..d4228d60019 100644 --- a/plugin/health/README.md +++ b/plugin/health/README.md @@ -48,7 +48,7 @@ Doing this is supported but both endpoints ":8080" and ":8081" will export the e ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metric is exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metric is exported: * `coredns_health_request_duration_seconds{}` - duration to process a HTTP query to the local `/health` endpoint. As this a local operation it should be fast. A (large) increase in this diff --git a/plugin/hosts/README.md b/plugin/hosts/README.md index 270a8200bdc..7cfe8e933c3 100644 --- a/plugin/hosts/README.md +++ b/plugin/hosts/README.md @@ -55,7 +55,7 @@ hosts [FILE [ZONES...]] { ~~~ * **FILE** the hosts file to read and parse. If the path is relative the path from the *root* - directive will be prepended to it. Defaults to /etc/hosts if omitted. We scan the file for changes + plugin will be prepended to it. Defaults to /etc/hosts if omitted. We scan the file for changes every 5 seconds. * **ZONES** zones it should be authoritative for. If empty, the zones from the configuration block are used. @@ -74,7 +74,7 @@ hosts [FILE [ZONES...]] { ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: - `coredns_hosts_entries_count{}` - The combined number of entries in hosts and Corefile. - `coredns_hosts_reload_timestamp_seconds{}` - The timestamp of the last reload of hosts file. diff --git a/plugin/import/README.md b/plugin/import/README.md index 76906d9fa4b..9da6e250a45 100644 --- a/plugin/import/README.md +++ b/plugin/import/README.md @@ -9,7 +9,7 @@ The *import* plugin can be used to include files into the main configuration. Another use is to reference predefined snippets. Both can help to avoid some duplication. -This is a unique directive in that *import* can appear outside of a server block. In other words, it +This is a unique plugin in that *import* can appear outside of a server block. In other words, it can appear at the top of a Corefile where an address would normally be. ## Syntax diff --git a/plugin/kubernetes/README.md b/plugin/kubernetes/README.md index 74de47ed7a0..fd3a60b17f6 100644 --- a/plugin/kubernetes/README.md +++ b/plugin/kubernetes/README.md @@ -24,7 +24,7 @@ This plugin can only be used once per Server Block. kubernetes [ZONES...] ~~~ -With only the directive specified, the *kubernetes* plugin will default to the zone specified in +With only the plugin specified, the *kubernetes* plugin will default to the zone specified in the server's block. It will handle all queries in that zone and connect to Kubernetes in-cluster. It will not provide PTR records for services or A records for pods. If **ZONES** is used it specifies all the zones the plugin should be authoritative for. @@ -231,7 +231,7 @@ plugin is also enabled: ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: * `coredns_kubernetes_dns_programming_duration_seconds{service_kind}` - Exports the [DNS programming latency SLI](https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md). diff --git a/plugin/reload/README.md b/plugin/reload/README.md index f50116cf437..77e720eacc9 100644 --- a/plugin/reload/README.md +++ b/plugin/reload/README.md @@ -96,7 +96,7 @@ CoreDNS v1.7.0 and later does parse the Corefile and supports detecting changes ## Metrics - If monitoring is enabled (via the *prometheus* directive) then the following metric is exported: + If monitoring is enabled (via the *prometheus* plugin) then the following metric is exported: * `coredns_reload_failed_count_total{}` - counts the number of failed reload attempts. diff --git a/plugin/sign/README.md b/plugin/sign/README.md index a28bf90d1e9..d2782816ae8 100644 --- a/plugin/sign/README.md +++ b/plugin/sign/README.md @@ -62,7 +62,7 @@ sign DBFILE [ZONES...] { ~~~ * **DBFILE** the zone database file to read and parse. If the path is relative, the path from the - *root* directive will be prepended to it. + *root* plugin will be prepended to it. * **ZONES** zones it should be sign for. If empty, the zones from the configuration block are used. * `key` specifies the key(s) (there can be multiple) to sign the zone. If `file` is @@ -71,7 +71,7 @@ sign DBFILE [ZONES...] { *ignored*. These keys must also be Key Signing Keys (KSK). * `directory` specifies the **DIR** where CoreDNS should save zones that have been signed. If not given this defaults to `/var/lib/coredns`. The zones are saved under the name - `db..signed`. If the path is relative the path from the *root* directive will be prepended + `db..signed`. If the path is relative the path from the *root* plugin will be prepended to it. Keys can be generated with `coredns-keygen`, to create one for use in the *sign* plugin, use: diff --git a/plugin/template/README.md b/plugin/template/README.md index 64af37f9311..654de86c9f0 100644 --- a/plugin/template/README.md +++ b/plugin/template/README.md @@ -59,7 +59,7 @@ The output of the template must be a [RFC 1035](https://tools.ietf.org/html/rfc1 ## Metrics -If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported: +If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported: * `coredns_template_matches_total{server, regex}` the total number of matched requests by regex. * `coredns_template_template_failures_total{server, regex,section,template}` the number of times the Go templating failed. Regex, section and template label values can be used to map the error back to the config file. diff --git a/plugin/tls/README.md b/plugin/tls/README.md index 82d059adeec..40b395c6b8b 100644 --- a/plugin/tls/README.md +++ b/plugin/tls/README.md @@ -11,7 +11,7 @@ or are using gRPC (https://grpc.io/, not an IETF standard). Normally DNS traffic all (DNSSEC only signs resource records). The *tls* "plugin" allows you to configure the cryptographic keys that are needed for both -DNS-over-TLS and DNS-over-gRPC. If the `tls` directive is omitted, then no encryption takes place. +DNS-over-TLS and DNS-over-gRPC. If the *tls* plugin is omitted, then no encryption takes place. The gRPC protobuffer is defined in `pb/dns.proto`. It defines the proto as a simple wrapper for the wire data of a DNS message. From bf356e79fc215b5c455fcaf461622d34f64e4e47 Mon Sep 17 00:00:00 2001 From: yuxiaobo96 <41496192+yuxiaobo96@users.noreply.github.com> Date: Wed, 9 Oct 2019 15:24:18 +0800 Subject: [PATCH 237/324] fix spelling mistakes (#3364) Automatically submitted. --- coredns.1.md | 2 +- plugin/acl/README.md | 2 +- plugin/acl/acl.go | 2 +- plugin/forward/persistent.go | 4 ++-- plugin/route53/route53.go | 2 +- plugin/route53/route53_test.go | 2 +- plugin/sign/sign.go | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/coredns.1.md b/coredns.1.md index 4784e969727..cb56bc7081b 100644 --- a/coredns.1.md +++ b/coredns.1.md @@ -1,6 +1,6 @@ ## CoreDNS -*coredns* - plugable DNS nameserver optimized for service discovery and flexibility. +*coredns* - pluggable DNS nameserver optimized for service discovery and flexibility. ## Synopsis diff --git a/plugin/acl/README.md b/plugin/acl/README.md index fe5582a6af3..9a964db2190 100644 --- a/plugin/acl/README.md +++ b/plugin/acl/README.md @@ -4,7 +4,7 @@ ## Description -With `acl` enabled, users are able to block suspicous DNS queries by configuring IP filter rule sets, i.e. allowing authorized queries to recurse or blocking unauthorized queries. +With `acl` enabled, users are able to block suspicious DNS queries by configuring IP filter rule sets, i.e. allowing authorized queries to recurse or blocking unauthorized queries. This plugin can be used multiple times per Server Block. diff --git a/plugin/acl/acl.go b/plugin/acl/acl.go index 2eea0a27c55..ce7b041cbb1 100644 --- a/plugin/acl/acl.go +++ b/plugin/acl/acl.go @@ -81,7 +81,7 @@ RulesCheckLoop: } // matchWithPolicies matches the DNS query with a list of ACL polices and returns suitable -// action agains the query. +// action against the query. func matchWithPolicies(policies []policy, w dns.ResponseWriter, r *dns.Msg) action { state := request.Request{W: w, Req: r} diff --git a/plugin/forward/persistent.go b/plugin/forward/persistent.go index 7dc01326bc7..afa95d343cd 100644 --- a/plugin/forward/persistent.go +++ b/plugin/forward/persistent.go @@ -118,14 +118,14 @@ func (t *Transport) cleanup(all bool) { } } -// It is hard to pin a value to this, the import thing is to no block forever, loosing at cached connection is not terrible. +// It is hard to pin a value to this, the import thing is to no block forever, losing at cached connection is not terrible. const yieldTimeout = 25 * time.Millisecond // Yield return the connection to transport for reuse. func (t *Transport) Yield(pc *persistConn) { pc.used = time.Now() // update used time - // Make ths non-blocking, because in the case of a very busy forwarder we will *block* on this yield. This + // Make this non-blocking, because in the case of a very busy forwarder we will *block* on this yield. This // blocks the outer go-routine and stuff will just pile up. We timeout when the send fails to as returning // these connection is an optimization anyway. select { diff --git a/plugin/route53/route53.go b/plugin/route53/route53.go index 644f7b43184..c86d5f0f587 100644 --- a/plugin/route53/route53.go +++ b/plugin/route53/route53.go @@ -193,7 +193,7 @@ func maybeUnescape(s string) (string, error) { case r >= rune('0') && r <= rune('9'): case r == rune('*'): if out != "" { - return "", errors.New("`*' ony supported as wildcard (leftmost label)") + return "", errors.New("`*' only supported as wildcard (leftmost label)") } case r == rune('-'): case r == rune('.'): diff --git a/plugin/route53/route53_test.go b/plugin/route53/route53_test.go index a4d71fbff48..aa5c82b9c43 100644 --- a/plugin/route53/route53_test.go +++ b/plugin/route53/route53_test.go @@ -285,7 +285,7 @@ func TestMaybeUnescape(t *testing.T) { // 3. Escaped dot, 'a' and a hyphen. No idea why but we'll allow it. {escaped: `weird\\055ex\\141mple\\056com\\056\\056`, want: "weird-example.com.."}, // 4. escaped `*` in the middle - NOT OK. - {escaped: `e\\052ample.com`, wantErr: errors.New("`*' ony supported as wildcard (leftmost label)")}, + {escaped: `e\\052ample.com`, wantErr: errors.New("`*' only supported as wildcard (leftmost label)")}, // 5. Invalid character. {escaped: `\\000.example.com`, wantErr: errors.New(`invalid character: \\000`)}, // 6. Invalid escape sequence in the middle. diff --git a/plugin/sign/sign.go b/plugin/sign/sign.go index ebe4522e2b6..f5eb9afacd8 100644 --- a/plugin/sign/sign.go +++ b/plugin/sign/sign.go @@ -31,7 +31,7 @@ const ( DurationSignatureExpireDays = 32 * 24 * time.Hour // sign for 32 days DurationRefreshHours = 5 * time.Hour // check zones every 5 hours DurationJitter = -18 * time.Hour // default max jitter - DurationSignatureInceptionHours = -3 * time.Hour // -(2+1) hours, be sure to catch daylight saving time and such, jitter is substracted + DurationSignatureInceptionHours = -3 * time.Hour // -(2+1) hours, be sure to catch daylight saving time and such, jitter is subtracted ) const timeFmt = "2006-01-02T15:04:05.000Z07:00" From 1820c71f28b3f0f26d97eef1dba41ff40dcaf15b Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 10 Oct 2019 07:45:28 +0100 Subject: [PATCH 238/324] Run makefile.doc (#3365) Generate manual pages; no other changes. Signed-off-by: Miek Gieben --- man/coredns-acl.7 | 4 ++-- man/coredns-autopath.7 | 4 ++-- man/coredns-bind.7 | 4 ++-- man/coredns-cache.7 | 4 ++-- man/coredns-dnssec.7 | 4 ++-- man/coredns-erratic.7 | 24 ++++++++++-------------- man/coredns-file.7 | 4 ++-- man/coredns-forward.7 | 6 ++---- man/coredns-grpc.7 | 4 ++-- man/coredns-health.7 | 4 ++-- man/coredns-hosts.7 | 6 +++--- man/coredns-import.7 | 4 ++-- man/coredns-kubernetes.7 | 35 ++++++++++++++++++++++++++++++----- man/coredns-reload.7 | 4 ++-- man/coredns-sign.7 | 6 +++--- man/coredns-template.7 | 4 ++-- man/coredns-tls.7 | 4 ++-- man/coredns.1 | 11 ++++------- 18 files changed, 76 insertions(+), 60 deletions(-) diff --git a/man/coredns-acl.7 b/man/coredns-acl.7 index 9a2ded0dd59..15c25af1dc8 100644 --- a/man/coredns-acl.7 +++ b/man/coredns-acl.7 @@ -1,12 +1,12 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ACL" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ACL" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .PP \fIacl\fP - enforces access control policies on source ip and prevents unauthorized access to DNS servers. .SH "DESCRIPTION" .PP -With \fB\fCacl\fR enabled, users are able to block suspicous DNS queries by configuring IP filter rule sets, i.e. allowing authorized queries to recurse or blocking unauthorized queries. +With \fB\fCacl\fR enabled, users are able to block suspicious DNS queries by configuring IP filter rule sets, i.e. allowing authorized queries to recurse or blocking unauthorized queries. .PP This plugin can be used multiple times per Server Block. diff --git a/man/coredns-autopath.7 b/man/coredns-autopath.7 index 4c3304b2904..ce5711bb833 100644 --- a/man/coredns-autopath.7 +++ b/man/coredns-autopath.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-AUTOPATH" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-AUTOPATH" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -36,7 +36,7 @@ If a plugin implements the \fB\fCAutoPather\fR interface then it can be used. .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric is exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric is exported: .IP \(bu 4 \fB\fCcoredns_autopath_success_count_total{server}\fR - counter of successfully autopath-ed queries. diff --git a/man/coredns-bind.7 b/man/coredns-bind.7 index 92a098962f7..942536b0f90 100644 --- a/man/coredns-bind.7 +++ b/man/coredns-bind.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-BIND" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-BIND" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -60,7 +60,7 @@ To allow processing DNS requests only local host on both IPv4 and IPv6 stacks, u .RE .PP -If the configuration comes up with several \fIbind\fP directives, all addresses are consolidated together: +If the configuration comes up with several \fIbind\fP plugins, all addresses are consolidated together: The following sample is equivalent to the preceding: .PP diff --git a/man/coredns-cache.7 b/man/coredns-cache.7 index 1707f65b6d7..5481ced31d7 100644 --- a/man/coredns-cache.7 +++ b/man/coredns-cache.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CACHE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CACHE" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -85,7 +85,7 @@ Entries with 0 TTL will remain in the cache until randomly evicted when the shar .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metrics are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: .IP \(bu 4 \fB\fCcoredns_cache_size{server, type}\fR - Total elements in the cache by cache type. diff --git a/man/coredns-dnssec.7 b/man/coredns-dnssec.7 index df17d282668..07c096110ff 100644 --- a/man/coredns-dnssec.7 +++ b/man/coredns-dnssec.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-DNSSEC" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-DNSSEC" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -69,7 +69,7 @@ RRSIGs. The default for \fBCAPACITY\fP is 10000. .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metrics are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: .IP \(bu 4 \fB\fCcoredns_dnssec_cache_size{server, type}\fR - total elements in the cache, type is "signature". diff --git a/man/coredns-erratic.7 b/man/coredns-erratic.7 index 61688ef9d78..8c97fc00901 100644 --- a/man/coredns-erratic.7 +++ b/man/coredns-erratic.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ERRATIC" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ERRATIC" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -7,18 +7,14 @@ .SH "DESCRIPTION" .PP -\fIerratic\fP returns a static response to all queries, but the responses can be delayed, dropped or truncated. -The \fIerratic\fP plugin will respond to every A or AAAA query. For any other type it will return -a SERVFAIL response. The reply for A will return 192.0.2.53 (see RFC -5737 -\[la]https://tools.ietf.org/html/rfc5737\[ra], -for AAAA it returns 2001:DB8::53 (see RFC 3849 -\[la]https://tools.ietf.org/html/rfc3849\[ra]) and for an -AXFR request it will respond with a small zone transfer. - -.PP -\fIerratic\fP can also be used in conjunction with the \fIautopath\fP plugin. This is mostly to aid in -testing. +\fIerratic\fP returns a static response to all queries, but the responses can be delayed, +dropped or truncated. The \fIerratic\fP plugin will respond to every A or AAAA query. For +any other type it will return a SERVFAIL response (except AXFR). The reply for A will return +192.0.2.53 (RFC 5737 +\[la]https://tools.ietf.org/html/rfc5737\[ra]), for AAAA it returns 2001:DB8::53 (RFC +3849 +\[la]https://tools.ietf.org/html/rfc3849\[ra]). For an AXFR request it will respond with a small +zone transfer. .SH "SYNTAX" .PP @@ -65,7 +61,7 @@ example.org { .RE .PP -Or even shorter if the defaults suits you. Note this only drops queries, it does not delay them. +Or even shorter if the defaults suit you. Note this only drops queries, it does not delay them. .PP .RS diff --git a/man/coredns-file.7 b/man/coredns-file.7 index bacd63c7c12..6bf36036939 100644 --- a/man/coredns-file.7 +++ b/man/coredns-file.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-FILE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FILE" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -24,7 +24,7 @@ file DBFILE [ZONES...] .IP \(bu 4 \fBDBFILE\fP the database file to read and parse. If the path is relative, the path from the \fIroot\fP -directive will be prepended to it. +plugin will be prepended to it. .IP \(bu 4 \fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block are used. diff --git a/man/coredns-forward.7 b/man/coredns-forward.7 index 8efabc22e9c..3bb51dc4b74 100644 --- a/man/coredns-forward.7 +++ b/man/coredns-forward.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-FORWARD" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FORWARD" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -142,7 +142,7 @@ readTimeout by default is 2 sec, and can decrease automatically down to 200ms .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric are exported: .IP \(bu 4 \fB\fCcoredns_forward_request_duration_seconds{to}\fR - duration per upstream interaction. @@ -155,8 +155,6 @@ If monitoring is enabled (via the \fIprometheus\fP directive) then the following .IP \(bu 4 \fB\fCcoredns_forward_healthcheck_broken_count_total{}\fR - counter of when all upstreams are unhealthy, and we are randomly (this always uses the \fB\fCrandom\fR policy) spraying to an upstream. -.IP \(bu 4 -\fB\fCcoredns_forward_socket_count_total{to}\fR - number of cached sockets per upstream. .PP diff --git a/man/coredns-grpc.7 b/man/coredns-grpc.7 index bcf3b469bb1..be33ab4988b 100644 --- a/man/coredns-grpc.7 +++ b/man/coredns-grpc.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-GRPC" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-GRPC" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -90,7 +90,7 @@ Also note the TLS config is "global" for the whole grpc proxy if you need a diff .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric are exported: .IP \(bu 4 \fB\fCcoredns_grpc_request_duration_seconds{to}\fR - duration per upstream interaction. diff --git a/man/coredns-health.7 b/man/coredns-health.7 index ecae0d7e2e8..adbbf4ea9c1 100644 --- a/man/coredns-health.7 +++ b/man/coredns-health.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-HEALTH" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HEALTH" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -69,7 +69,7 @@ Doing this is supported but both endpoints ":8080" and ":8081" will export the e .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric is exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric is exported: .IP \(bu 4 \fB\fCcoredns_health_request_duration_seconds{}\fR - duration to process a HTTP query to the local diff --git a/man/coredns-hosts.7 b/man/coredns-hosts.7 index c9cf7e65712..f31a08f3d76 100644 --- a/man/coredns-hosts.7 +++ b/man/coredns-hosts.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-HOSTS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HOSTS" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -68,7 +68,7 @@ hosts [FILE [ZONES...]] { .IP \(bu 4 \fBFILE\fP the hosts file to read and parse. If the path is relative the path from the \fIroot\fP -directive will be prepended to it. Defaults to /etc/hosts if omitted. We scan the file for changes +plugin will be prepended to it. Defaults to /etc/hosts if omitted. We scan the file for changes every 5 seconds. .IP \(bu 4 \fBZONES\fP zones it should be authoritative for. If empty, the zones from the configuration block @@ -95,7 +95,7 @@ queries for those zones will be subject to fallthrough. .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metrics are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: .IP \(bu 4 \fB\fCcoredns_hosts_entries_count{}\fR - The combined number of entries in hosts and Corefile. diff --git a/man/coredns-import.7 b/man/coredns-import.7 index 918e4a2cd35..ba3dfb52a1e 100644 --- a/man/coredns-import.7 +++ b/man/coredns-import.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-IMPORT" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-IMPORT" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -11,7 +11,7 @@ The \fIimport\fP plugin can be used to include files into the main configuration reference predefined snippets. Both can help to avoid some duplication. .PP -This is a unique directive in that \fIimport\fP can appear outside of a server block. In other words, it +This is a unique plugin in that \fIimport\fP can appear outside of a server block. In other words, it can appear at the top of a Corefile where an address would normally be. .SH "SYNTAX" diff --git a/man/coredns-kubernetes.7 b/man/coredns-kubernetes.7 index 2c7baf8c3e9..e3b5e317788 100644 --- a/man/coredns-kubernetes.7 +++ b/man/coredns-kubernetes.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-KUBERNETES" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-KUBERNETES" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -37,7 +37,7 @@ kubernetes [ZONES...] .RE .PP -With only the directive specified, the \fIkubernetes\fP plugin will default to the zone specified in +With only the plugin specified, the \fIkubernetes\fP plugin will default to the zone specified in the server's block. It will handle all queries in that zone and connect to Kubernetes in-cluster. It will not provide PTR records for services or A records for pods. If \fBZONES\fP is used it specifies all the zones the plugin should be authoritative for. @@ -308,9 +308,6 @@ For example, wildcards can be used to resolve all Endpoints for a Service as \fB .fi .RE -.PP -This response can be randomized using the \fB\fCloadbalance\fR plugin - .SH "METADATA" .PP The kubernetes plugin will publish the following metadata, if the \fImetadata\fP @@ -334,3 +331,31 @@ kubernetes/client-namespace: the client pod's namespace, if \fB\fCpods verified\ kubernetes/client-pod-name: the client pod's name, if \fB\fCpods verified\fR mode is enabled +.SH "METRICS" +.PP +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: + +.IP \(bu 4 +\fB\fCcoredns_kubernetes_dns_programming_duration_seconds{service_kind}\fR - Exports the +DNS programming latency SLI +\[la]https://github.com/kubernetes/community/blob/master/sig-scalability/slos/dns_programming_latency.md\[ra]. +The metrics has the \fB\fCservice_kind\fR label that identifies the kind of the +kubernetes service +\[la]https://kubernetes.io/docs/concepts/services-networking/service\[ra]. +It may take one of the three values: + +.RS +.IP \(en 4 +\fB\fCcluster_ip\fR +.IP \(en 4 +\fB\fCheadless_with_selector\fR +.IP \(en 4 +\fB\fCheadless_without_selector\fR + +.RE + + +.SH "BUGS" +.PP +The duration metric only supports the "headless\fIwith\fPselector" service currently. + diff --git a/man/coredns-reload.7 b/man/coredns-reload.7 index 2ff687df91a..a4fdefe8a51 100644 --- a/man/coredns-reload.7 +++ b/man/coredns-reload.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-RELOAD" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-RELOAD" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -135,7 +135,7 @@ CoreDNS v1.7.0 and later does parse the Corefile and supports detecting changes .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metric is exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metric is exported: .IP \(bu 4 \fB\fCcoredns_reload_failed_count_total{}\fR - counts the number of failed reload attempts. diff --git a/man/coredns-sign.7 b/man/coredns-sign.7 index 06cdf68a717..4f7d02c2845 100644 --- a/man/coredns-sign.7 +++ b/man/coredns-sign.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-SIGN" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-SIGN" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -82,7 +82,7 @@ sign DBFILE [ZONES...] { .IP \(bu 4 \fBDBFILE\fP the zone database file to read and parse. If the path is relative, the path from the -\fIroot\fP directive will be prepended to it. +\fIroot\fP plugin will be prepended to it. .IP \(bu 4 \fBZONES\fP zones it should be sign for. If empty, the zones from the configuration block are used. @@ -94,7 +94,7 @@ for \fB\fCK++\fR files. Any metadata in these files (Activate, Pu .IP \(bu 4 \fB\fCdirectory\fR specifies the \fBDIR\fP where CoreDNS should save zones that have been signed. If not given this defaults to \fB\fC/var/lib/coredns\fR. The zones are saved under the name -\fB\fCdb..signed\fR. If the path is relative the path from the \fIroot\fP directive will be prepended +\fB\fCdb..signed\fR. If the path is relative the path from the \fIroot\fP plugin will be prepended to it. diff --git a/man/coredns-template.7 b/man/coredns-template.7 index 948f11f449e..8ece38763bc 100644 --- a/man/coredns-template.7 +++ b/man/coredns-template.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-TEMPLATE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TEMPLATE" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -93,7 +93,7 @@ The output of the template must be a RFC 1035 .SH "METRICS" .PP -If monitoring is enabled (via the \fIprometheus\fP directive) then the following metrics are exported: +If monitoring is enabled (via the \fIprometheus\fP plugin) then the following metrics are exported: .IP \(bu 4 \fB\fCcoredns_template_matches_total{server, regex}\fR the total number of matched requests by regex. diff --git a/man/coredns-tls.7 b/man/coredns-tls.7 index 016c09c2d7c..1098a50ec5e 100644 --- a/man/coredns-tls.7 +++ b/man/coredns-tls.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-TLS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TLS" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -14,7 +14,7 @@ all (DNSSEC only signs resource records). .PP The \fItls\fP "plugin" allows you to configure the cryptographic keys that are needed for both -DNS-over-TLS and DNS-over-gRPC. If the \fB\fCtls\fR directive is omitted, then no encryption takes place. +DNS-over-TLS and DNS-over-gRPC. If the \fItls\fP plugin is omitted, then no encryption takes place. .PP The gRPC protobuffer is defined in \fB\fCpb/dns.proto\fR. It defines the proto as a simple wrapper for the diff --git a/man/coredns.1 b/man/coredns.1 index 2e4824ce1c5..022c9da8be8 100644 --- a/man/coredns.1 +++ b/man/coredns.1 @@ -1,9 +1,9 @@ -.\" Generated by Mmark Markdown Processer - mmark.nl -.TH "COREDNS" 1 "April 2019" "CoreDNS" "CoreDNS" +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS" 1 "October 2019" "CoreDNS" "CoreDNS" .SH "COREDNS" .PP -\fIcoredns\fP - plugable DNS nameserver optimized for service discovery and flexibility. +\fIcoredns\fP - pluggable DNS nameserver optimized for service discovery and flexibility. .SH "SYNOPSIS" .PP @@ -32,9 +32,6 @@ Available options: specify Corefile to load, if not given CoreDNS will look for a \fB\fCCorefile\fR in the current directory. .TP -\fB-cpu\fP \fBCAP\fP -specify maximum CPU capacity in percent. -.TP \fB-dns.port\fP \fBPORT\fP override default port (53) to listen on. .TP @@ -61,5 +58,5 @@ Apache License 2.0 .SH "SEE ALSO" .PP -Corefile(5) coredns-k8s_external(7) coredns-erratic(7) coredns-nsid(7) coredns-hosts(7) coredns-dnssec(7) coredns-health(7) coredns-grpc(7) coredns-ready(7) coredns-file(7) coredns-root(7) coredns-autopath(7) coredns-auto(7) coredns-dnstap(7) coredns-pprof(7) coredns-tls(7) coredns-loadbalance(7) coredns-cache(7) coredns-whoami(7) coredns-bind(7) coredns-loop(7) coredns-import(7) coredns-chaos(7) coredns-template(7) coredns-log(7) coredns-kubernetes(7) coredns-forward(7) coredns-debug(7) coredns-secondary(7) coredns-route53(7) coredns-errors(7) coredns-metrics(7) coredns-reload(7) coredns-rewrite(7) coredns-metadata(7) coredns-federation(7) coredns-etcd(7) coredns-cancel(7) coredns-trace(7). +Corefile(5) coredns-k8s_external(7) coredns-erratic(7) coredns-nsid(7) coredns-any(7) coredns-hosts(7) coredns-acl(7) coredns-dnssec(7) coredns-health(7) coredns-grpc(7) coredns-sign(7) coredns-file(7) coredns-root(7) coredns-autopath(7) coredns-auto(7) coredns-clouddns(7) coredns-dnstap(7) coredns-pprof(7) coredns-tls(7) coredns-loadbalance(7) coredns-cache(7) coredns-ready(7) coredns-whoami(7) coredns-bind(7) coredns-loop(7) coredns-import(7) coredns-chaos(7) coredns-template(7) coredns-azure(7) coredns-log(7) coredns-kubernetes(7) coredns-forward(7) coredns-debug(7) coredns-secondary(7) coredns-route53(7) coredns-errors(7) coredns-metrics(7) coredns-reload(7) coredns-rewrite(7) coredns-metadata(7) coredns-etcd(7) coredns-cancel(7) coredns-trace(7). From 62f7e0784f65236d0f510e5212fba0e2615c015b Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 13 Oct 2019 17:37:24 +0100 Subject: [PATCH 239/324] Add release notes for 1.6.5 (#3373) Signed-off-by: Miek Gieben --- notes/coredns-1.6.5.md | 44 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 notes/coredns-1.6.5.md diff --git a/notes/coredns-1.6.5.md b/notes/coredns-1.6.5.md new file mode 100644 index 00000000000..479befc719a --- /dev/null +++ b/notes/coredns-1.6.5.md @@ -0,0 +1,44 @@ ++++ +title = "CoreDNS-1.6.5 Release" +description = "CoreDNS-1.6.5 Release Notes." +tags = ["Release", "1.6.5", "Notes"] +release = "1.6.5" +date = 2019-10-13T10:00:00+00:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.5](https://github.com/coredns/coredns/releases/tag/v1.6.5). + +A fairly small release that polishes various plugins and fixes a bunch of bugs. + +# Plugins + +* [*forward*](/plugins/forward) don't block on returning sockets; instead timeout and drop the + socket on the floor, this makes each go-routine guarantee to exit . +* [*kubernetes*](/plugins/kubernetes) adds metrics to measure kubernetes control plane latency, see + documentation for details. +* [*file*](/plugins/file) fixes a panic when comparing domains names + +## Brought to You By + +Erfan Besharat, +Hauke Löffler, +Ingo Gottwald, +janluk, +Miek Gieben, +Uladzimir Trehubenka, +Yong Tang, +yuxiaobo96. + +## Noteworthy Changes + +* core: Make request.Request smaller (https://github.com/coredns/coredns/pull/3351) +* plugin/cache: move goroutine closure to separate function to save memory (https://github.com/coredns/coredns/pull/3353) +* plugin/clouddns: remove initialization from init (https://github.com/coredns/coredns/pull/3349) +* plugin/erratic: doc and zone transfer (https://github.com/coredns/coredns/pull/3340) +* plugin/file: fix panic in miekg/dns.CompareDomainName() (https://github.com/coredns/coredns/pull/3337) +* plugin/forward: make Yield not block (https://github.com/coredns/coredns/pull/3336) +* plugin/forward: Move map to array (https://github.com/coredns/coredns/pull/3339) +* plugin/kubernetes: Measure and expose DNS programming latency from Kubernetes plugin. (https://github.com/coredns/coredns/pull/3171) +* plugin/route53: remove amazon intialization from init (https://github.com/coredns/coredns/pull/3348) From 70d3d94585d2953e2c9c642e5bbdbc7435f828ea Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2019 08:47:57 -0700 Subject: [PATCH 240/324] build(deps): bump github.com/Azure/go-autorest/autorest (#3375) Bumps [github.com/Azure/go-autorest/autorest](https://github.com/Azure/go-autorest) from 0.9.1 to 0.9.2. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/autorest/v0.9.1...autorest/v0.9.2) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 1d1c1a0eb88..7909b1b4b18 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible - github.com/Azure/go-autorest/autorest v0.9.1 + github.com/Azure/go-autorest/autorest v0.9.2 github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/DataDog/datadog-go v2.2.0+incompatible // indirect diff --git a/go.sum b/go.sum index 101f1d54125..f011097b24b 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,8 @@ github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8Ly github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.1 h1:JB7Mqhna/7J8gZfVHjxDSTLSD6ciz2YgSMb/4qLXTtY= github.com/Azure/go-autorest/autorest v0.9.1/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= From dc02c20d39541f1a7514535240717c61ac91ce98 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2019 08:51:33 -0700 Subject: [PATCH 241/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.1 to 1.25.11 (#3378) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.1 to 1.25.11. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.1...v1.25.11) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 7909b1b4b18..e576081268f 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.25.1 + github.com/aws/aws-sdk-go v1.25.11 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index f011097b24b..85578c2498e 100644 --- a/go.sum +++ b/go.sum @@ -51,6 +51,8 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.25.1 h1:d7zDXFT2Tgq/yw7Wku49+lKisE8Xc85erb+8PlE/Shk= github.com/aws/aws-sdk-go v1.25.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.11 h1:wUivbsVOH3LpHdC3Rl5i+FLHfg4sOmYgv4bvHe7+/Pg= +github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 0c03290c792d30720563bd8b9f4c6534758d48b1 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2019 08:52:39 -0700 Subject: [PATCH 242/324] build(deps): bump google.golang.org/api from 0.10.0 to 0.11.0 (#3377) Bumps [google.golang.org/api](https://github.com/google/google-api-go-client) from 0.10.0 to 0.11.0. - [Release notes](https://github.com/google/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/master/CHANGES.md) - [Commits](https://github.com/google/google-api-go-client/compare/v0.10.0...v0.11.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index e576081268f..c12e9454616 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc golang.org/x/net v0.0.0-20191003171128-d98b1b443823 // indirect golang.org/x/sys v0.0.0-20191003212358-c178f38b412c - google.golang.org/api v0.10.0 + google.golang.org/api v0.11.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.24.0 gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 diff --git a/go.sum b/go.sum index 85578c2498e..9a33d430cbd 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,7 @@ github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1Gn github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= @@ -343,6 +344,7 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -427,6 +429,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18 h1:xFbv3LvlvQAmbNJFCBKRv1Ccvnh9FVsW0FX2kTWWowE= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -434,6 +437,8 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ= google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.11.0 h1:n/qM3q0/rV2F0pox7o0CvNhlPvZAo7pLbef122cbLJ0= +google.golang.org/api v0.11.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -480,6 +485,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50= k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= From 34ffe2a0a5826b02cc56c83931c066f8d6715a18 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2019 09:13:00 -0700 Subject: [PATCH 243/324] build(deps): bump github.com/Azure/go-autorest/autorest/azure/auth (#3376) Bumps [github.com/Azure/go-autorest/autorest/azure/auth](https://github.com/Azure/go-autorest) from 0.3.0 to 0.4.0. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/tracing/v0.3.0...tracing/v0.4.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index c12e9454616..89e22836972 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Azure/azure-sdk-for-go v31.1.0+incompatible github.com/Azure/go-autorest/autorest v0.9.2 - github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 + github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect diff --git a/go.sum b/go.sum index 9a33d430cbd..9c75bd01364 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,12 @@ github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 h1:JwftqZDtWkr3qt1kcEgPd7H57uCHsXKXf66agWUQcGw= github.com/Azure/go-autorest/autorest/azure/auth v0.3.0/go.mod h1:CI4BQYBct8NS7BXNBBX+RchsFsUu5+oz+OSyR/ZIi7U= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= From 9ccc736f6868fb63fa401e0c72343f91d63a3d07 Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Wed, 16 Oct 2019 08:32:11 +0200 Subject: [PATCH 244/324] plugin/dnssec, plugin/sign: ed25519 support (#3380) * add ed25519 dnskey support Signed-off-by: Sean Liao * fix ed25519 type assertion Signed-off-by: Sean Liao * clean up whitespace Signed-off-by: Sean Liao --- plugin/dnssec/dnskey.go | 6 +++++- plugin/sign/keys.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/plugin/dnssec/dnskey.go b/plugin/dnssec/dnskey.go index 1a2cf93438b..5c67fca0802 100644 --- a/plugin/dnssec/dnskey.go +++ b/plugin/dnssec/dnskey.go @@ -9,8 +9,9 @@ import ( "time" "github.com/coredns/coredns/request" - "github.com/miekg/dns" + + "golang.org/x/crypto/ed25519" ) // DNSKEY holds a DNSSEC public and private key used for on-the-fly signing. @@ -55,6 +56,9 @@ func ParseKeyFile(pubFile, privFile string) (*DNSKEY, error) { if s, ok := p.(*ecdsa.PrivateKey); ok { return &DNSKEY{K: dk, D: dk.ToDS(dns.SHA256), s: s, tag: dk.KeyTag()}, nil } + if s, ok := p.(ed25519.PrivateKey); ok { + return &DNSKEY{K: dk, D: dk.ToDS(dns.SHA256), s: s, tag: dk.KeyTag()}, nil + } return &DNSKEY{K: dk, D: dk.ToDS(dns.SHA256), s: nil, tag: 0}, errors.New("no private key found") } diff --git a/plugin/sign/keys.go b/plugin/sign/keys.go index 346175be04f..03065e8b434 100644 --- a/plugin/sign/keys.go +++ b/plugin/sign/keys.go @@ -97,7 +97,7 @@ func readKeyPair(public, private string) (Pair, error) { switch signer := privkey.(type) { case *ecdsa.PrivateKey: return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil - case *ed25519.PrivateKey: + case ed25519.PrivateKey: return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil case *rsa.PrivateKey: return Pair{Public: dnskey.(*dns.DNSKEY), KeyTag: dnskey.(*dns.DNSKEY).KeyTag(), Private: signer}, nil From c187d8fa01f94c7b5c94dff0386bada2fe6a92aa Mon Sep 17 00:00:00 2001 From: Anshul Sharma Date: Thu, 17 Oct 2019 17:36:35 +0300 Subject: [PATCH 245/324] Issue-3374 (#3383) - Remove logo Signed-off-by: Anshul Sharma --- coremain/logo.go | 14 -------------- coremain/run.go | 9 +++------ 2 files changed, 3 insertions(+), 20 deletions(-) delete mode 100644 coremain/logo.go diff --git a/coremain/logo.go b/coremain/logo.go deleted file mode 100644 index 09db8de4287..00000000000 --- a/coremain/logo.go +++ /dev/null @@ -1,14 +0,0 @@ -package coremain - -// This is CoreDNS' ascii LOGO, nothing fancy. It's generated with: -// figlet -f slant CoreDNS -// We're printing the logo line by line, hence splitting it up. -var logo = []string{ - ` ______ ____ _ _______`, - ` / ____/___ ________ / __ \/ | / / ___/`, - ` / / / __ \/ ___/ _ \/ / / / |/ /\__ \ `, - `/ /___/ /_/ / / / __/ /_/ / /| /___/ / `, - `\____/\____/_/ \___/_____/_/ |_//____/ `, -} - -const marker = "~ " diff --git a/coremain/run.go b/coremain/run.go index 5dcb3c013bd..98ba2fd6c57 100644 --- a/coremain/run.go +++ b/coremain/run.go @@ -139,21 +139,18 @@ func defaultLoader(serverType string) (caddy.Input, error) { }, nil } -// showVersion prints the version that is starting. We print our logo on the left. +// showVersion prints the version that is starting. func showVersion() { - fmt.Println(logo[0]) fmt.Print(versionString()) fmt.Print(releaseString()) if devBuild && gitShortStat != "" { fmt.Printf("%s\n%s\n", gitShortStat, gitFilesModified) } - fmt.Println(logo[3]) - fmt.Println(logo[4]) } // versionString returns the CoreDNS version as a string. func versionString() string { - return fmt.Sprintf("%s\t%s%s-%s\n", logo[1], marker, caddy.AppName, caddy.AppVersion) + return fmt.Sprintf("%s-%s\n", caddy.AppName, caddy.AppVersion) } // releaseString returns the release information related to CoreDNS version: @@ -161,7 +158,7 @@ func versionString() string { // e.g., // linux/amd64, go1.8.3, a6d2d7b5 func releaseString() string { - return fmt.Sprintf("%s\t%s%s/%s, %s, %s\n", logo[2], marker, runtime.GOOS, runtime.GOARCH, runtime.Version(), GitCommit) + return fmt.Sprintf("%s/%s, %s, %s\n", runtime.GOOS, runtime.GOARCH, runtime.Version(), GitCommit) } // setVersion figures out the version information From 5f114d38cab74f0c5c99b03ffe99f5607e393a92 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Thu, 17 Oct 2019 15:53:11 +0100 Subject: [PATCH 246/324] pkg/log: add Clear to stop debug logging (#3372) When reloading we need to disable debug output when the debug plugin is removed from the config file. Add a `Clear` function to pkg/log and use it in the server server. Add test case in pkg/log, for actuall check I manually checked the output by sprinkling some debug statements in the startup and checking with sending SIGUSR1. Also clear up the comments in pkg/log to remove the text about time stamping. Fixes: #3035 Signed-off-by: Miek Gieben --- core/dnsserver/server.go | 4 ++++ plugin/pkg/log/log.go | 19 +++++++++++++------ plugin/pkg/log/log_test.go | 7 +++++++ 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/core/dnsserver/server.go b/core/dnsserver/server.go index 587f219ddc9..9a6fea1ced4 100644 --- a/core/dnsserver/server.go +++ b/core/dnsserver/server.go @@ -64,6 +64,10 @@ func NewServer(addr string, group []*Config) (*Server, error) { if site.Debug { s.debug = true log.D.Set() + } else { + // When reloading we need to explicitly disable debug logging if it is now disabled. + s.debug = false + log.D.Clear() } // set the config per zone s.zones[site.Zone] = site diff --git a/plugin/pkg/log/log.go b/plugin/pkg/log/log.go index 2d8ba78d851..6f9dd07c68d 100644 --- a/plugin/pkg/log/log.go +++ b/plugin/pkg/log/log.go @@ -1,7 +1,7 @@ -// Package log implements a small wrapper around the std lib log package. -// It implements log levels by prefixing the logs with the current time -// with in RFC3339Milli and [INFO], [DEBUG], [WARNING] or [ERROR]. -// Debug logging is available and enabled if the *debug* plugin is used. +// Package log implements a small wrapper around the std lib log package. It +// implements log levels by prefixing the logs with [INFO], [DEBUG], [WARNING] +// or [ERROR]. Debug logging is available and enabled if the *debug* plugin is +// used. // // log.Info("this is some logging"), will log on the Info level. // @@ -25,14 +25,21 @@ type d struct { sync.RWMutex } -// Set sets d to true. +// Set enables debug logging. func (d *d) Set() { d.Lock() d.on = true d.Unlock() } -// Value return the boolean value of d. +// Clear disables debug logging. +func (d *d) Clear() { + d.Lock() + d.on = false + d.Unlock() +} + +// Value returns if debug logging is enabled. func (d *d) Value() bool { d.RLock() b := d.on diff --git a/plugin/pkg/log/log_test.go b/plugin/pkg/log/log_test.go index 4f0dc4f4318..32c1d39ad15 100644 --- a/plugin/pkg/log/log_test.go +++ b/plugin/pkg/log/log_test.go @@ -23,6 +23,13 @@ func TestDebug(t *testing.T) { if x := f.String(); !strings.Contains(x, debug+"debug") { t.Errorf("Expected debug log to be %s, got %s", debug+"debug", x) } + f.Reset() + + D.Clear() + Debug("debug") + if x := f.String(); x != "" { + t.Errorf("Expected no debug logs, got %s", x) + } } func TestDebugx(t *testing.T) { From 6f375cbbda9cd8572d8f7b271138045282ede7b5 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Sat, 19 Oct 2019 03:08:14 -0400 Subject: [PATCH 247/324] add MustNormalize (#3385) Signed-off-by: Chris O'Haver --- plugin/normalize.go | 17 +++++++++++++++-- plugin/normalize_test.go | 11 +++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/plugin/normalize.go b/plugin/normalize.go index 1289207fde8..dea7d6723a5 100644 --- a/plugin/normalize.go +++ b/plugin/normalize.go @@ -61,13 +61,26 @@ type ( // Normalize will return the host portion of host, stripping // of any port or transport. The host will also be fully qualified and lowercased. +// An empty string is returned on failure func (h Host) Normalize() string { + // The error can be ignored here, because this function should only be called after the corefile has already been vetted. + host, _ := h.MustNormalize() + return host +} + +// MustNormalize will return the host portion of host, stripping +// of any port or transport. The host will also be fully qualified and lowercased. +// An error is returned on error +func (h Host) MustNormalize() (string, error) { s := string(h) _, s = parse.Transport(s) // The error can be ignored here, because this function is called after the corefile has already been vetted. - host, _, _, _ := SplitHostPort(s) - return Name(host).Normalize() + host, _, _, err := SplitHostPort(s) + if err != nil { + return "", err + } + return Name(host).Normalize(), nil } // SplitHostPort splits s up in a host and port portion, taking reverse address notation into account. diff --git a/plugin/normalize_test.go b/plugin/normalize_test.go index 315aaf5d9fe..2a82271bae6 100644 --- a/plugin/normalize_test.go +++ b/plugin/normalize_test.go @@ -83,6 +83,17 @@ func TestHostNormalize(t *testing.T) { } } +func TestHostMustNormalizeFail(t *testing.T) { + hosts := []string{"..:53", "::", ""} + for i := 0; i < len(hosts); i++ { + ts := hosts[i] + h, err := Host(ts).MustNormalize() + if err == nil { + t.Errorf("Expected error, got %v", h) + } + } +} + func TestSplitHostPortReverse(t *testing.T) { tests := map[string]int{ "example.org.": 0, From 9c59b0e02af6c37dc7003d81871610f531a31b5a Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 21 Oct 2019 16:46:28 -0700 Subject: [PATCH 248/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.11 to 1.25.16 (#3389) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.11 to 1.25.16. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.11...v1.25.16) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 89e22836972..70e8e3fb879 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.25.11 + github.com/aws/aws-sdk-go v1.25.16 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 9c75bd01364..2abb8908866 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,8 @@ github.com/aws/aws-sdk-go v1.25.1 h1:d7zDXFT2Tgq/yw7Wku49+lKisE8Xc85erb+8PlE/Shk github.com/aws/aws-sdk-go v1.25.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.11 h1:wUivbsVOH3LpHdC3Rl5i+FLHfg4sOmYgv4bvHe7+/Pg= github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.16 h1:k7Fy6T/uNuLX6zuayU/TJoP7yMgGcJSkZpF7QVjwYpA= +github.com/aws/aws-sdk-go v1.25.16/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From dfcb4edbe9f8adc774703b40e9134452d9918bde Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 21 Oct 2019 16:46:41 -0700 Subject: [PATCH 249/324] build(deps): bump github.com/prometheus/client_golang (#3390) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.1.0 to 1.2.1. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/master/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.1.0...v1.2.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 70e8e3fb879..79c7d2829ec 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 github.com/philhofer/fwd v1.0.0 // indirect - github.com/prometheus/client_golang v1.1.0 + github.com/prometheus/client_golang v1.2.1 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 github.com/prometheus/common v0.7.0 github.com/spf13/pflag v1.0.3 // indirect diff --git a/go.sum b/go.sum index 2abb8908866..2a6f3738ddb 100644 --- a/go.sum +++ b/go.sum @@ -70,6 +70,7 @@ github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= @@ -276,9 +277,12 @@ github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= @@ -290,6 +294,7 @@ github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNG github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -413,6 +418,7 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPT golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c h1:6Zx7DRlKXf79yfxuQ/7GqV3w2y7aDsk6bGg0MzF5RVU= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= From eca2385bdd82e9d49c7b9498f22abc678235a954 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2019 07:16:50 -0700 Subject: [PATCH 250/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.16 to 1.25.19 (#3407) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.16 to 1.25.19. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.16...v1.25.19) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 79c7d2829ec..33af74e4395 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.25.16 + github.com/aws/aws-sdk-go v1.25.19 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 2a6f3738ddb..3af290daac3 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,8 @@ github.com/aws/aws-sdk-go v1.25.11 h1:wUivbsVOH3LpHdC3Rl5i+FLHfg4sOmYgv4bvHe7+/P github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.16 h1:k7Fy6T/uNuLX6zuayU/TJoP7yMgGcJSkZpF7QVjwYpA= github.com/aws/aws-sdk-go v1.25.16/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.19 h1:sp3xP91qIAVhWufyn9qM6Zhhn6kX06WJQcmhRj7QTXc= +github.com/aws/aws-sdk-go v1.25.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 1821d9cb7e77fb9cdc8b90f46171d7c16b14e37c Mon Sep 17 00:00:00 2001 From: Dan Panzarella Date: Mon, 28 Oct 2019 15:17:54 -0400 Subject: [PATCH 251/324] small typo in autopath README so->to (#3408) Automatically submitted. --- plugin/autopath/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/autopath/README.md b/plugin/autopath/README.md index 426151ce7b6..86266c5fa44 100644 --- a/plugin/autopath/README.md +++ b/plugin/autopath/README.md @@ -39,7 +39,7 @@ The `server` label is explained in the *metrics* plugin documentation. autopath my-resolv.conf ~~~ -Use `my-resolv.conf` as the file to get the search path from. This file only needs so have one line: +Use `my-resolv.conf` as the file to get the search path from. This file only needs to have one line: `search domain1 domain2 ...` ~~~ From 431c70e8b965545f16fa7afdc025787aec014241 Mon Sep 17 00:00:00 2001 From: Euan Kemp Date: Tue, 29 Oct 2019 06:38:56 -0700 Subject: [PATCH 252/324] Correct some if/iff mixups (#3406) This is a fixup for https://github.com/coredns/coredns/pull/3310 which replaced some 'iff's with 'if' under the assumption they were typos. I'm fairly confident they were "If and only if" (https://en.wikipedia.org/wiki/If_and_only_if), which is commonly shortened as "iff". I've updated them to the full length 'if, and only if' for the sake of readability. Signed-off-by: Euan Kemp --- plugin/autopath/autopath.go | 2 +- plugin/dnssec/dnskey.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugin/autopath/autopath.go b/plugin/autopath/autopath.go index 8583988d9ba..4db9366d2f9 100644 --- a/plugin/autopath/autopath.go +++ b/plugin/autopath/autopath.go @@ -5,7 +5,7 @@ client's search path resolution by performing these lookups on the server... The server has a copy (via AutoPathFunc) of the client's search path and on receiving a query it first establishes if the suffix matches the FIRST configured element. If no match can be found the query will be forwarded up the plugin -chain without interference (if 'fallthrough' has been set). +chain without interference (if, and only if, 'fallthrough' has been set). If the query is deemed to fall in the search path the server will perform the queries with each element of the search path appended in sequence until a diff --git a/plugin/dnssec/dnskey.go b/plugin/dnssec/dnskey.go index 5c67fca0802..6ca89802ea5 100644 --- a/plugin/dnssec/dnskey.go +++ b/plugin/dnssec/dnskey.go @@ -83,12 +83,12 @@ func (d Dnssec) getDNSKEY(state request.Request, zone string, do bool, server st return m } -// Return true if this is a zone key with the SEP bit unset. This implies a ZSK (rfc4034 2.1.1). +// Return true if, and only if, this is a zone key with the SEP bit unset. This implies a ZSK (rfc4034 2.1.1). func (k DNSKEY) isZSK() bool { return k.K.Flags&(1<<8) == (1<<8) && k.K.Flags&1 == 0 } -// Return true if this is a zone key with the SEP bit set. This implies a KSK (rfc4034 2.1.1). +// Return true if, and only if, this is a zone key with the SEP bit set. This implies a KSK (rfc4034 2.1.1). func (k DNSKEY) isKSK() bool { return k.K.Flags&(1<<8) == (1<<8) && k.K.Flags&1 == 1 } From 5d8bda58a9cab89bd21860e7429b93cf65a728b1 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Wed, 30 Oct 2019 21:04:07 -0700 Subject: [PATCH 253/324] Update Project Lead's term (#3338) Base on the [GOVERNANCE.md] of CoreDNS, we will need to update project lead's term: 1) Any PR should only be opened no earlier than 6 weeks before the end of current lead's term 2) PR can only be merged after it has been opened for 4 weeks. 3) See [GOVERNANCE.md] for more details on how votes are counted. Since it is less than 6 weeks before 11/11/2019, this PR: 1) propose to extend project lead's term from 11/11/2019 to 11/11/2020. 2) will keep open until at least to 10/29/2019 (4 weeks), so that community has a chance to voice opinions Please specify +1/-1 for agree/disagree. Note: Alternative PRs could be opened concurrently, as long as it following the rules specified in [GOVERNANCE.md]. Signed-off-by: Yong Tang --- OWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OWNERS b/OWNERS index f0dd9c8506b..7807a965a9c 100644 --- a/OWNERS +++ b/OWNERS @@ -7,7 +7,7 @@ approvers: - grobie - isolus - johnbelamaric - - miekg # miek@miek.nl | project lead: 11/11/2019 + - miekg # miek@miek.nl | project lead: 11/11/2020 - pmoroney - rajansandeep - stp-ip From a7ab592e7895ee8369885f0d41251b9adc4f8cbf Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Fri, 1 Nov 2019 12:02:43 -0400 Subject: [PATCH 254/324] plugin/transfer: Zone transfer plugin (#3223) * transfer plugin Signed-off-by: Chris O'Haver --- core/dnsserver/zdirectives.go | 1 + core/plugin/zplugin.go | 1 + plugin.cfg | 1 + plugin/transfer/OWNERS | 6 + plugin/transfer/README.md | 32 ++++ plugin/transfer/setup.go | 102 +++++++++++ plugin/transfer/setup_test.go | 85 +++++++++ plugin/transfer/transfer.go | 181 +++++++++++++++++++ plugin/transfer/transfer_test.go | 291 +++++++++++++++++++++++++++++++ 9 files changed, 700 insertions(+) create mode 100644 plugin/transfer/OWNERS create mode 100644 plugin/transfer/README.md create mode 100644 plugin/transfer/setup.go create mode 100644 plugin/transfer/setup_test.go create mode 100644 plugin/transfer/transfer.go create mode 100644 plugin/transfer/transfer_test.go diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go index d1310a7f1b6..8e2153aadc8 100644 --- a/core/dnsserver/zdirectives.go +++ b/core/dnsserver/zdirectives.go @@ -35,6 +35,7 @@ var Directives = []string{ "dnssec", "autopath", "template", + "transfer", "hosts", "route53", "azure", diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index 3a31f412749..79892235f8e 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -45,6 +45,7 @@ import ( _ "github.com/coredns/coredns/plugin/template" _ "github.com/coredns/coredns/plugin/tls" _ "github.com/coredns/coredns/plugin/trace" + _ "github.com/coredns/coredns/plugin/transfer" _ "github.com/coredns/coredns/plugin/whoami" _ "github.com/coredns/federation" ) diff --git a/plugin.cfg b/plugin.cfg index 66552cd802d..47b9ecb517a 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -44,6 +44,7 @@ rewrite:rewrite dnssec:dnssec autopath:autopath template:template +transfer:transfer hosts:hosts route53:route53 azure:azure diff --git a/plugin/transfer/OWNERS b/plugin/transfer/OWNERS new file mode 100644 index 00000000000..3a4ef23a1f6 --- /dev/null +++ b/plugin/transfer/OWNERS @@ -0,0 +1,6 @@ +reviewers: + - miekg + - chrisohaver +approvers: + - miekg + - chrisohaver diff --git a/plugin/transfer/README.md b/plugin/transfer/README.md new file mode 100644 index 00000000000..df3ab3eb42a --- /dev/null +++ b/plugin/transfer/README.md @@ -0,0 +1,32 @@ +# transfer + +## Name + +*transfer* - answer zone transfers requests for compatible authoritative +plugins. + +## Description + +This plugin answers zone transfers for authoritative plugins that implement +`transfer.Transferer`. + +Transfer answers AXFR requests and IXFR requests with AXFR fallback if the +zone has changed. + +Notifies are not currently supported. + +## Syntax + +~~~ +transfer [ZONE...] { + to HOST... +} +~~~ + +* **ZONES** The zones *transfer* will answer zone requests for. If left blank, + the zones are inherited from the enclosing server block. To answer zone + transfers for a given zone, there must be another plugin in the same server + block that serves the same zone, and implements `transfer.Transferer`. + +* `to ` **HOST...** The hosts *transfer* will transfer to. Use `*` to permit + transfers to all hosts. diff --git a/plugin/transfer/setup.go b/plugin/transfer/setup.go new file mode 100644 index 00000000000..e83fd6d0b0d --- /dev/null +++ b/plugin/transfer/setup.go @@ -0,0 +1,102 @@ +package transfer + +import ( + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + parsepkg "github.com/coredns/coredns/plugin/pkg/parse" + "github.com/coredns/coredns/plugin/pkg/transport" + + "github.com/caddyserver/caddy" +) + +func init() { + caddy.RegisterPlugin("transfer", caddy.Plugin{ + ServerType: "dns", + Action: setup, + }) +} + +func setup(c *caddy.Controller) error { + t, err := parse(c) + + if err != nil { + return plugin.Error("transfer", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + t.Next = next + return t + }) + + c.OnStartup(func() error { + // find all plugins that implement Transferer and add them to Transferers + plugins := dnsserver.GetConfig(c).Handlers() + for _, pl := range plugins { + tr, ok := pl.(Transferer) + if !ok { + continue + } + t.Transferers = append(t.Transferers, tr) + } + return nil + }) + + return nil +} + +func parse(c *caddy.Controller) (*Transfer, error) { + + t := &Transfer{} + for c.Next() { + x := &xfr{} + zones := c.RemainingArgs() + + if len(zones) != 0 { + x.Zones = zones + for i := 0; i < len(x.Zones); i++ { + nzone, err := plugin.Host(x.Zones[i]).MustNormalize() + if err != nil { + return nil, err + } + x.Zones[i] = nzone + } + } else { + x.Zones = make([]string, len(c.ServerBlockKeys)) + for i := 0; i < len(c.ServerBlockKeys); i++ { + nzone, err := plugin.Host(c.ServerBlockKeys[i]).MustNormalize() + if err != nil { + return nil, err + } + x.Zones[i] = nzone + } + } + + for c.NextBlock() { + switch c.Val() { + case "to": + args := c.RemainingArgs() + if len(args) == 0 { + return nil, c.ArgErr() + } + for _, host := range args { + if host == "*" { + x.to = append(x.to, host) + continue + } + normalized, err := parsepkg.HostPort(host, transport.Port) + if err != nil { + return nil, err + } + x.to = append(x.to, normalized) + } + default: + return nil, plugin.Error("transfer", c.Errf("unknown property '%s'", c.Val())) + } + } + if len(x.to) == 0 { + return nil, plugin.Error("transfer", c.Errf("'to' is required", c.Val())) + } + t.xfrs = append(t.xfrs, x) + } + return t, nil +} diff --git a/plugin/transfer/setup_test.go b/plugin/transfer/setup_test.go new file mode 100644 index 00000000000..421910d46bc --- /dev/null +++ b/plugin/transfer/setup_test.go @@ -0,0 +1,85 @@ +package transfer + +import ( + "testing" + + "github.com/caddyserver/caddy" +) + +func TestParse(t *testing.T) { + tests := []struct { + input string + shouldErr bool + exp *Transfer + }{ + {`transfer example.net example.org { + to 1.2.3.4 5.6.7.8:1053 [1::2]:34 + } + transfer example.com example.edu { + to * 1.2.3.4 + }`, + false, + &Transfer{ + xfrs: []*xfr{{ + Zones: []string{"example.net.", "example.org."}, + to: []string{"1.2.3.4:53", "5.6.7.8:1053", "[1::2]:34"}, + }, { + Zones: []string{"example.com.", "example.edu."}, + to: []string{"*", "1.2.3.4:53"}, + }}, + }, + }, + // errors + {`transfer example.net example.org { + }`, + true, + nil, + }, + {`transfer example.net example.org { + invalid option + }`, + true, + nil, + }, + } + for i, tc := range tests { + c := caddy.NewTestController("dns", tc.input) + transfer, err := parse(c) + + if err == nil && tc.shouldErr { + t.Fatalf("Test %d expected errors, but got no error", i) + } + if err != nil && !tc.shouldErr { + t.Fatalf("Test %d expected no errors, but got '%v'", i, err) + } + if tc.shouldErr { + continue + } + + if len(tc.exp.xfrs) != len(transfer.xfrs) { + t.Fatalf("Test %d expected %d xfrs, got %d", i, len(tc.exp.xfrs), len(transfer.xfrs)) + } + for j, x := range transfer.xfrs { + // Check Zones + if len(tc.exp.xfrs[j].Zones) != len(x.Zones) { + t.Fatalf("Test %d expected %d zones, got %d", i, len(tc.exp.xfrs[i].Zones), len(x.Zones)) + } + for k, zone := range x.Zones { + if tc.exp.xfrs[j].Zones[k] != zone { + t.Errorf("Test %d expected zone %v, got %v", i, tc.exp.xfrs[j].Zones[k], zone) + + } + } + // Check to + if len(tc.exp.xfrs[j].to) != len(x.to) { + t.Fatalf("Test %d expected %d 'to' values, got %d", i, len(tc.exp.xfrs[i].to), len(x.to)) + } + for k, to := range x.to { + if tc.exp.xfrs[j].to[k] != to { + t.Errorf("Test %d expected %v in 'to', got %v", i, tc.exp.xfrs[j].to[k], to) + + } + } + } + } +} diff --git a/plugin/transfer/transfer.go b/plugin/transfer/transfer.go new file mode 100644 index 00000000000..9f86915485b --- /dev/null +++ b/plugin/transfer/transfer.go @@ -0,0 +1,181 @@ +package transfer + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + + "github.com/coredns/coredns/plugin" + clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/request" + + "github.com/miekg/dns" +) + +var log = clog.NewWithPlugin("transfer") + +// Transfer is a plugin that handles zone transfers. +type Transfer struct { + Transferers []Transferer // the list of plugins that implement Transferer + xfrs []*xfr + Next plugin.Handler // the next plugin in the chain +} + +type xfr struct { + Zones []string + to []string +} + +// Transferer may be implemented by plugins to enable zone transfers +type Transferer interface { + // Transfer returns a channel to which it writes responses to the transfer request. + // If the plugin is not authoritative for the zone, it should immediately return the + // Transfer.ErrNotAuthoritative error. + // + // If serial is 0, handle as an AXFR request. Transfer should send all records + // in the zone to the channel. The SOA should be written to the channel first, followed + // by all other records, including all NS + glue records. + // + // If serial is not 0, handle as an IXFR request. If the serial is equal to or greater (newer) than + // the current serial for the zone, send a single SOA record to the channel. + // If the serial is less (older) than the current serial for the zone, perform an AXFR fallback + // by proceeding as if an AXFR was requested (as above). + Transfer(zone string, serial uint32) (<-chan []dns.RR, error) +} + +var ( + // ErrNotAuthoritative is returned by Transfer() when the plugin is not authoritative for the zone + ErrNotAuthoritative = errors.New("not authoritative for zone") +) + +// ServeDNS implements the plugin.Handler interface. +func (t Transfer) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + state := request.Request{W: w, Req: r} + if state.QType() != dns.TypeAXFR && state.QType() != dns.TypeIXFR { + return plugin.NextOrFailure(t.Name(), t.Next, ctx, w, r) + } + + // Find the first transfer instance for which the queried zone is a subdomain. + var x *xfr + for _, xfr := range t.xfrs { + zone := plugin.Zones(xfr.Zones).Matches(state.Name()) + if zone == "" { + continue + } + x = xfr + } + if x == nil { + // Requested zone did not match any transfer instance zones. + // Pass request down chain in case later plugins are capable of handling transfer requests themselves. + return plugin.NextOrFailure(t.Name(), t.Next, ctx, w, r) + } + + if !x.allowed(state) { + return dns.RcodeRefused, nil + } + + // Get serial from request if this is an IXFR + var serial uint32 + if state.QType() == dns.TypeIXFR { + soa, ok := r.Ns[0].(*dns.SOA) + if !ok { + return dns.RcodeServerFailure, nil + } + serial = soa.Serial + } + + // Get a receiving channel from the first Transferer plugin that returns one + var fromPlugin <-chan []dns.RR + for _, p := range t.Transferers { + var err error + fromPlugin, err = p.Transfer(state.QName(), serial) + if err == ErrNotAuthoritative { + // plugin was not authoritative for the zone, try next plugin + continue + } + if err != nil { + return dns.RcodeServerFailure, err + } + break + } + + if fromPlugin == nil { + return plugin.NextOrFailure(t.Name(), t.Next, ctx, w, r) + } + + // Send response to client + ch := make(chan *dns.Envelope) + tr := new(dns.Transfer) + wg := new(sync.WaitGroup) + go func() { + wg.Add(1) + tr.Out(w, r, ch) + wg.Done() + }() + + var soa *dns.SOA + rrs := []dns.RR{} + l := 0 + +receive: + for records := range fromPlugin { + for _, record := range records { + if soa == nil { + if soa = record.(*dns.SOA); soa == nil { + break receive + } + serial = soa.Serial + } + rrs = append(rrs, record) + if len(rrs) > 500 { + ch <- &dns.Envelope{RR: rrs} + l += len(rrs) + rrs = []dns.RR{} + } + } + } + + if len(rrs) > 0 { + ch <- &dns.Envelope{RR: rrs} + l += len(rrs) + rrs = []dns.RR{} + } + + if soa != nil { + ch <- &dns.Envelope{RR: []dns.RR{soa}} // closing SOA. + l++ + } + + close(ch) // Even though we close the channel here, we still have + wg.Wait() // to wait before we can return and close the connection. + + if soa == nil { + return dns.RcodeServerFailure, fmt.Errorf("first record in zone %s is not SOA", state.QName()) + } + + log.Infof("Outgoing transfer of %d records of zone %s to %s with %d SOA serial", l, state.QName(), state.IP(), serial) + return dns.RcodeSuccess, nil +} + +func (x xfr) allowed(state request.Request) bool { + for _, h := range x.to { + if h == "*" { + return true + } + to, _, err := net.SplitHostPort(h) + if err != nil { + return false + } + // If remote IP matches we accept. + remote := state.IP() + if to == remote { + return true + } + } + return false +} + +// Name implements the Handler interface. +func (Transfer) Name() string { return "transfer" } diff --git a/plugin/transfer/transfer_test.go b/plugin/transfer/transfer_test.go new file mode 100644 index 00000000000..8dce4c6e132 --- /dev/null +++ b/plugin/transfer/transfer_test.go @@ -0,0 +1,291 @@ +package transfer + +import ( + "context" + "fmt" + "testing" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/pkg/dnstest" + "github.com/coredns/coredns/plugin/test" + + "github.com/miekg/dns" +) + +// transfererPlugin implements transfer.Transferer and plugin.Handler +type transfererPlugin struct { + Zone string + Serial uint32 + Next plugin.Handler +} + +// Name implements plugin.Handler +func (transfererPlugin) Name() string { return "transfererplugin" } + +// ServeDNS implements plugin.Handler +func (p transfererPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + if r.Question[0].Name != p.Zone { + return p.Next.ServeDNS(ctx, w, r) + } + return 0, nil +} + +// Transfer implements transfer.Transferer - it returns a static AXFR response, or +// if serial is current, an abbreviated IXFR response +func (p transfererPlugin) Transfer(zone string, serial uint32) (<-chan []dns.RR, error) { + if zone != p.Zone { + return nil, ErrNotAuthoritative + } + ch := make(chan []dns.RR, 2) + defer close(ch) + ch <- []dns.RR{test.SOA(fmt.Sprintf("%s 100 IN SOA ns.dns.%s hostmaster.%s %d 7200 1800 86400 100", p.Zone, p.Zone, p.Zone, p.Serial))} + if serial >= p.Serial { + return ch, nil + } + ch <- []dns.RR{ + test.NS(fmt.Sprintf("%s 100 IN NS ns.dns.%s", p.Zone, p.Zone)), + test.A(fmt.Sprintf("ns.dns.%s 100 IN A 1.2.3.4", p.Zone)), + } + return ch, nil +} + +type terminatingPlugin struct{} + +// Name implements plugin.Handler +func (terminatingPlugin) Name() string { return "testplugin" } + +// ServeDNS implements plugin.Handler that returns NXDOMAIN for all requests +func (terminatingPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + m := new(dns.Msg) + m.SetRcode(r, dns.RcodeNameError) + w.WriteMsg(m) + return dns.RcodeNameError, nil +} + +func newTestTransfer() Transfer { + nextPlugin1 := transfererPlugin{Zone: "example.com.", Serial: 12345} + nextPlugin2 := transfererPlugin{Zone: "example.org.", Serial: 12345} + nextPlugin2.Next = terminatingPlugin{} + nextPlugin1.Next = nextPlugin2 + + transfer := Transfer{ + Transferers: []Transferer{nextPlugin1, nextPlugin2}, + xfrs: []*xfr{ + { + Zones: []string{"example.org."}, + to: []string{"*"}, + }, + { + Zones: []string{"example.com."}, + to: []string{"*"}, + }, + }, + Next: nextPlugin1, + } + return transfer +} + +func TestTransferNonZone(t *testing.T) { + + transfer := newTestTransfer() + ctx := context.TODO() + + for _, tc := range []string{"sub.example.org.", "example.test."} { + w := dnstest.NewRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetAxfr(tc) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + if w.Msg == nil { + t.Fatalf("Got nil message for AXFR %s", tc) + } + + if w.Msg.Rcode != dns.RcodeNameError { + t.Errorf("Expected NXDOMAIN for AXFR %s got %s", tc, dns.RcodeToString[w.Msg.Rcode]) + } + } +} + +func TestTransferNotAXFRorIXFR(t *testing.T) { + + transfer := newTestTransfer() + + ctx := context.TODO() + w := dnstest.NewRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetQuestion("test.domain.", dns.TypeA) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + if w.Msg == nil { + t.Fatal("Got nil message") + } + + if w.Msg.Rcode != dns.RcodeNameError { + t.Errorf("Expected NXDOMAIN got %s", dns.RcodeToString[w.Msg.Rcode]) + } +} + +func TestTransferAXFRExampleOrg(t *testing.T) { + + transfer := newTestTransfer() + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetAxfr(transfer.xfrs[0].Zones[0]) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + validateAXFRResponse(t, w) +} + +func TestTransferAXFRExampleCom(t *testing.T) { + + transfer := newTestTransfer() + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetAxfr(transfer.xfrs[1].Zones[0]) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + validateAXFRResponse(t, w) +} + +func TestTransferIXFRFallback(t *testing.T) { + + transfer := newTestTransfer() + + testPlugin := transfer.Transferers[0].(transfererPlugin) + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetIxfr( + transfer.xfrs[0].Zones[0], + testPlugin.Serial-1, + "ns.dns."+testPlugin.Zone, + "hostmaster.dns."+testPlugin.Zone, + ) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + validateAXFRResponse(t, w) +} + +func TestTransferIXFRCurrent(t *testing.T) { + + transfer := newTestTransfer() + + testPlugin := transfer.Transferers[0].(transfererPlugin) + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetIxfr( + transfer.xfrs[0].Zones[0], + testPlugin.Serial, + "ns.dns."+testPlugin.Zone, + "hostmaster.dns."+testPlugin.Zone, + ) + + _, err := transfer.ServeDNS(ctx, w, dnsmsg) + if err != nil { + t.Error(err) + } + + if len(w.Msgs) == 0 { + t.Logf("%+v\n", w) + t.Fatal("Did not get back a zone response") + } + + if len(w.Msgs[0].Answer) != 1 { + t.Logf("%+v\n", w) + t.Fatalf("Expected 1 answer, got %d", len(w.Msgs[0].Answer)) + } + + // Ensure the answer is the SOA + if w.Msgs[0].Answer[0].Header().Rrtype != dns.TypeSOA { + t.Error("Answer does not contain the SOA record") + } +} + +func validateAXFRResponse(t *testing.T, w *dnstest.MultiRecorder) { + if len(w.Msgs) == 0 { + t.Logf("%+v\n", w) + t.Fatal("Did not get back a zone response") + } + + if len(w.Msgs[0].Answer) == 0 { + t.Logf("%+v\n", w) + t.Fatal("Did not get back an answer") + } + + // Ensure the answer starts with SOA + if w.Msgs[0].Answer[0].Header().Rrtype != dns.TypeSOA { + t.Error("Answer does not start with SOA record") + } + + // Ensure the answer ends with SOA + if w.Msgs[len(w.Msgs)-1].Answer[len(w.Msgs[len(w.Msgs)-1].Answer)-1].Header().Rrtype != dns.TypeSOA { + t.Error("Answer does not end with SOA record") + } + + // Ensure the answer is the expected length + c := 0 + for _, m := range w.Msgs { + c += len(m.Answer) + } + if c != 4 { + t.Errorf("Answer is not the expected length (expected 4, got %d)", c) + } +} + +func TestTransferNotAllowed(t *testing.T) { + nextPlugin := transfererPlugin{Zone: "example.org.", Serial: 12345} + + transfer := Transfer{ + Transferers: []Transferer{nextPlugin}, + xfrs: []*xfr{ + { + Zones: []string{"example.org."}, + to: []string{"1.2.3.4"}, + }, + }, + Next: nextPlugin, + } + + ctx := context.TODO() + w := dnstest.NewMultiRecorder(&test.ResponseWriter{}) + dnsmsg := &dns.Msg{} + dnsmsg.SetAxfr(transfer.xfrs[0].Zones[0]) + + rcode, err := transfer.ServeDNS(ctx, w, dnsmsg) + + if err != nil { + t.Error(err) + } + + if rcode != dns.RcodeRefused { + t.Errorf("Expected REFUSED response code, got %s", dns.RcodeToString[rcode]) + } + +} From 655e959685dade8e6b1f5cb94b447448fb1d0b16 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2019 05:48:34 -0800 Subject: [PATCH 255/324] build(deps): bump google.golang.org/api from 0.11.0 to 0.13.0 (#3416) Bumps [google.golang.org/api](https://github.com/google/google-api-go-client) from 0.11.0 to 0.13.0. - [Release notes](https://github.com/google/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/master/CHANGES.md) - [Commits](https://github.com/google/google-api-go-client/compare/v0.11.0...v0.13.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 33af74e4395..c8e31ad54be 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc golang.org/x/net v0.0.0-20191003171128-d98b1b443823 // indirect golang.org/x/sys v0.0.0-20191003212358-c178f38b412c - google.golang.org/api v0.11.0 + google.golang.org/api v0.13.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.24.0 gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 diff --git a/go.sum b/go.sum index 3af290daac3..4cd17be0265 100644 --- a/go.sum +++ b/go.sum @@ -453,6 +453,8 @@ google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ= google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.11.0 h1:n/qM3q0/rV2F0pox7o0CvNhlPvZAo7pLbef122cbLJ0= google.golang.org/api v0.11.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 86680793dc1f412bb51bf5b220508050ec2f1192 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2019 06:09:15 -0800 Subject: [PATCH 256/324] build(deps): bump gopkg.in/DataDog/dd-trace-go.v1 from 1.18.0 to 1.19.0 (#3418) Bumps [gopkg.in/DataDog/dd-trace-go.v1](https://github.com/DataDog/dd-trace-go) from 1.18.0 to 1.19.0. - [Release notes](https://github.com/DataDog/dd-trace-go/releases) - [Commits](https://github.com/DataDog/dd-trace-go/compare/v1.18.0...v1.19.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index c8e31ad54be..92e9e3e9a10 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( google.golang.org/api v0.13.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.24.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 diff --git a/go.sum b/go.sum index 4cd17be0265..f03750f9afe 100644 --- a/go.sum +++ b/go.sum @@ -477,6 +477,8 @@ google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 h1:n0pmVs3TxCRC9Jrd1/aNaB8roW5ZHMw0UOWPxAoUkkI= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= +gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 h1:aFSFd6oDMdvPYiToGqTv7/ERA6QrPhGaXSuueRCaM88= +gopkg.in/DataDog/dd-trace-go.v1 v1.19.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= From a214161bc3bdb7b3f805c11d6ffe1fe27c613b3e Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 5 Nov 2019 11:18:02 +0000 Subject: [PATCH 257/324] Update the notes for the 1.6.5 release. (#3424) * Update the notes for the 1.6.5 release. Signed-off-by: Miek Gieben * manpages: regenerate Signed-off-by: Miek Gieben * Fix date Signed-off-by: Miek Gieben * Go gen/go build Signed-off-by: Miek Gieben --- go.mod | 4 ++-- go.sum | 4 ++++ man/coredns-autopath.7 | 4 ++-- man/coredns-forward.7 | 2 +- man/coredns-transfer.7 | 42 ++++++++++++++++++++++++++++++++++++++++++ notes/coredns-1.6.5.md | 16 ++++++++++++---- 6 files changed, 63 insertions(+), 9 deletions(-) create mode 100644 man/coredns-transfer.7 diff --git a/go.mod b/go.mod index 92e9e3e9a10..2600b094313 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 github.com/philhofer/fwd v1.0.0 // indirect github.com/prometheus/client_golang v1.2.1 - github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/common v0.7.0 github.com/spf13/pflag v1.0.3 // indirect github.com/tinylib/msgp v1.1.0 // indirect @@ -42,7 +42,7 @@ require ( go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc golang.org/x/net v0.0.0-20191003171128-d98b1b443823 // indirect - golang.org/x/sys v0.0.0-20191003212358-c178f38b412c + golang.org/x/sys v0.0.0-20191010194322-b09406accb47 google.golang.org/api v0.13.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.24.0 diff --git a/go.sum b/go.sum index f03750f9afe..96169ce7580 100644 --- a/go.sum +++ b/go.sum @@ -72,6 +72,7 @@ github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -284,6 +285,7 @@ github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNk github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -296,6 +298,7 @@ github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNG github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -420,6 +423,7 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPT golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c h1:6Zx7DRlKXf79yfxuQ/7GqV3w2y7aDsk6bGg0MzF5RVU= golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/man/coredns-autopath.7 b/man/coredns-autopath.7 index ce5711bb833..e493dc4514f 100644 --- a/man/coredns-autopath.7 +++ b/man/coredns-autopath.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-AUTOPATH" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-AUTOPATH" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -56,7 +56,7 @@ autopath my\-resolv.conf .RE .PP -Use \fB\fCmy-resolv.conf\fR as the file to get the search path from. This file only needs so have one line: +Use \fB\fCmy-resolv.conf\fR as the file to get the search path from. This file only needs to have one line: \fB\fCsearch domain1 domain2 ...\fR .PP diff --git a/man/coredns-forward.7 b/man/coredns-forward.7 index 3bb51dc4b74..894fe713873 100644 --- a/man/coredns-forward.7 +++ b/man/coredns-forward.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-FORWARD" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FORWARD" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-transfer.7 b/man/coredns-transfer.7 new file mode 100644 index 00000000000..fd9e0e7ef87 --- /dev/null +++ b/man/coredns-transfer.7 @@ -0,0 +1,42 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-TRANSFER" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fItransfer\fP - answer zone transfers requests for compatible authoritative +plugins. + +.SH "DESCRIPTION" +.PP +This plugin answers zone transfers for authoritative plugins that implement +\fB\fCtransfer.Transferer\fR. + +.PP +Transfer answers AXFR requests and IXFR requests with AXFR fallback if the +zone has changed. + +.PP +Notifies are not currently supported. + +.SH "SYNTAX" +.PP +.RS + +.nf +transfer [ZONE...] { + to HOST... +} + +.fi +.RE + +.IP \(bu 4 +\fBZONES\fP The zones \fItransfer\fP will answer zone requests for. If left blank, +the zones are inherited from the enclosing server block. To answer zone +transfers for a given zone, there must be another plugin in the same server +block that serves the same zone, and implements \fB\fCtransfer.Transferer\fR. +.IP \(bu 4 +\fB\fCto\fR \fBHOST...\fP The hosts \fItransfer\fP will transfer to. Use \fB\fC*\fR to permit +transfers to all hosts. + + diff --git a/notes/coredns-1.6.5.md b/notes/coredns-1.6.5.md index 479befc719a..bb426a99cc2 100644 --- a/notes/coredns-1.6.5.md +++ b/notes/coredns-1.6.5.md @@ -3,7 +3,7 @@ title = "CoreDNS-1.6.5 Release" description = "CoreDNS-1.6.5 Release Notes." tags = ["Release", "1.6.5", "Notes"] release = "1.6.5" -date = 2019-10-13T10:00:00+00:00 +date = 2019-11-06T10:00:00+00:00 author = "coredns" +++ @@ -14,14 +14,19 @@ A fairly small release that polishes various plugins and fixes a bunch of bugs. # Plugins +A new plugin [*transfer*](/plugins/transfer) that encapsulates the zone transfer knowledge and code +in one place. This makes it easier to add this functionality to new plugins. Plugins that already +implement zone transfers are expected to move to it in the 1.7.0 release. + * [*forward*](/plugins/forward) don't block on returning sockets; instead timeout and drop the - socket on the floor, this makes each go-routine guarantee to exit . + socket on the floor, this makes each go-routine guarantee to exit. * [*kubernetes*](/plugins/kubernetes) adds metrics to measure kubernetes control plane latency, see documentation for details. -* [*file*](/plugins/file) fixes a panic when comparing domains names +* [*file*](/plugins/file) fixes a panic when comparing domains names. ## Brought to You By +Chris O'Haver, Erfan Besharat, Hauke Löffler, Ingo Gottwald, @@ -34,6 +39,7 @@ yuxiaobo96. ## Noteworthy Changes * core: Make request.Request smaller (https://github.com/coredns/coredns/pull/3351) +* pkg/log: Add Clear to stop debug logging (https://github.com/coredns/coredns/pull/3372) * plugin/cache: move goroutine closure to separate function to save memory (https://github.com/coredns/coredns/pull/3353) * plugin/clouddns: remove initialization from init (https://github.com/coredns/coredns/pull/3349) * plugin/erratic: doc and zone transfer (https://github.com/coredns/coredns/pull/3340) @@ -41,4 +47,6 @@ yuxiaobo96. * plugin/forward: make Yield not block (https://github.com/coredns/coredns/pull/3336) * plugin/forward: Move map to array (https://github.com/coredns/coredns/pull/3339) * plugin/kubernetes: Measure and expose DNS programming latency from Kubernetes plugin. (https://github.com/coredns/coredns/pull/3171) -* plugin/route53: remove amazon intialization from init (https://github.com/coredns/coredns/pull/3348) +* plugin/route53: Remove amazon initialization from init (https://github.com/coredns/coredns/pull/3348) +* plugin/transfer: Zone transfer plugin (https://github.com/coredns/coredns/pull/3223) +* plugins: Add MustNormalize (https://github.com/coredns/coredns/pull/3385) From c2fd1b2467249d8b1bb6bbee72ad83b63dd9087f Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 5 Nov 2019 13:46:54 +0000 Subject: [PATCH 258/324] dump version to 1.6.5 (#3425) Signed-off-by: Miek Gieben --- coremain/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coremain/version.go b/coremain/version.go index 1e7766cd093..53bc9714b86 100644 --- a/coremain/version.go +++ b/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.6.4" + CoreVersion = "1.6.5" coreName = "CoreDNS" serverType = "dns" ) From 5ecd4ec57a70cbf989dd39435d84afc3cfefbacc Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Tue, 5 Nov 2019 15:57:22 +0000 Subject: [PATCH 259/324] transfer: small doc update (#3426) Tweak the README a little. Name and spell out axfr/ixfr. Signed-off-by: Miek Gieben --- plugin/transfer/README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/plugin/transfer/README.md b/plugin/transfer/README.md index df3ab3eb42a..45ec60c994e 100644 --- a/plugin/transfer/README.md +++ b/plugin/transfer/README.md @@ -2,16 +2,15 @@ ## Name -*transfer* - answer zone transfers requests for compatible authoritative -plugins. +*transfer* - perform zone transfers for other plugins. ## Description This plugin answers zone transfers for authoritative plugins that implement `transfer.Transferer`. -Transfer answers AXFR requests and IXFR requests with AXFR fallback if the -zone has changed. +Transfer answers full zone transfer (AXFR) requests and incremental zone transfer (IXFR) requests +with AXFR fallback if the zone has changed. Notifies are not currently supported. From 21fe65ed8dc67f38253594ac33670f85627d6f47 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2019 08:42:33 -0800 Subject: [PATCH 260/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.19 to 1.25.25 (#3429) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.19 to 1.25.25. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.19...v1.25.25) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 2600b094313..d5157b32258 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.25.19 + github.com/aws/aws-sdk-go v1.25.25 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 96169ce7580..28fd7055f00 100644 --- a/go.sum +++ b/go.sum @@ -62,6 +62,8 @@ github.com/aws/aws-sdk-go v1.25.16 h1:k7Fy6T/uNuLX6zuayU/TJoP7yMgGcJSkZpF7QVjwYp github.com/aws/aws-sdk-go v1.25.16/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.19 h1:sp3xP91qIAVhWufyn9qM6Zhhn6kX06WJQcmhRj7QTXc= github.com/aws/aws-sdk-go v1.25.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.25 h1:j3HLOqcDWjNox1DyvJRs+kVQF42Ghtv6oL6cVBfXS3U= +github.com/aws/aws-sdk-go v1.25.25/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 1942746c484fc736c933678cf0e37dcb2edfe58d Mon Sep 17 00:00:00 2001 From: Miciah Dashiel Butler Masters Date: Wed, 6 Nov 2019 16:27:55 -0500 Subject: [PATCH 261/324] plugin/reload: Fix "durations" documentation link (#3431) * plugin/reload/README.md: Fix the syntax of the link to the Go documentation for duration values. Signed-off-by: Miciah Dashiel Butler Masters --- plugin/reload/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/reload/README.md b/plugin/reload/README.md index 77e720eacc9..8e58d1e7367 100644 --- a/plugin/reload/README.md +++ b/plugin/reload/README.md @@ -40,7 +40,7 @@ reload [INTERVAL] [JITTER] The plugin will check for changes every **INTERVAL**, subject to +/- the **JITTER** duration. -* **INTERVAL** and **JITTER** are Golang (durations)[[https://golang.org/pkg/time/#ParseDuration](https://golang.org/pkg/time/#ParseDuration)]. +* **INTERVAL** and **JITTER** are Golang [durations](https://golang.org/pkg/time/#ParseDuration). The default **INTERVAL** is 30s, default **JITTER** is 15s, the minimal value for **INTERVAL** is 2s, and for **JITTER** it is 1s. If **JITTER** is more than half of **INTERVAL**, it will be set to half of **INTERVAL** From 113783ed91c85eb70455b4065b74ddc470b41c55 Mon Sep 17 00:00:00 2001 From: Guangming Wang Date: Thu, 7 Nov 2019 23:29:50 +0800 Subject: [PATCH 262/324] Call wg.Add in main goroutine to avoid race conditons. (#3433) Signed-off-by: Guangming Wang --- plugin/transfer/transfer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/transfer/transfer.go b/plugin/transfer/transfer.go index 9f86915485b..0bf92ac47a2 100644 --- a/plugin/transfer/transfer.go +++ b/plugin/transfer/transfer.go @@ -109,8 +109,8 @@ func (t Transfer) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg ch := make(chan *dns.Envelope) tr := new(dns.Transfer) wg := new(sync.WaitGroup) + wg.Add(1) go func() { - wg.Add(1) tr.Out(w, r, ch) wg.Done() }() From e23a34abb439c8169f90a2fc431e0ba0019d6a50 Mon Sep 17 00:00:00 2001 From: Kohei Yoshida <14937183+ykhr53@users.noreply.github.com> Date: Sun, 10 Nov 2019 08:10:12 +0000 Subject: [PATCH 263/324] Add bufsize plugin for preparing the DNS Flag Day and avoiding IP fragmentation (#3401) * add bufsize plugin Signed-off-by: ykhr53 * add docstring and comment Signed-off-by: ykhr53 * delete stdout messages when get an error Signed-off-by: ykhr53 * change to context.Background from TODO Signed-off-by: ykhr53 * define default bufsize as defaultBufSize constant Signed-off-by: ykhr53 * fix some comments Signed-off-by: ykhr53 * function name change: parse Signed-off-by: ykhr53 * function name change: parse Signed-off-by: ykhr53 --- core/dnsserver/zdirectives.go | 1 + core/plugin/zplugin.go | 1 + plugin.cfg | 1 + plugin/bufsize/README.md | 30 ++++++++++++++ plugin/bufsize/bufsize.go | 31 +++++++++++++++ plugin/bufsize/bufsize_test.go | 72 ++++++++++++++++++++++++++++++++++ plugin/bufsize/setup.go | 52 ++++++++++++++++++++++++ plugin/bufsize/setup_test.go | 46 ++++++++++++++++++++++ 8 files changed, 234 insertions(+) create mode 100644 plugin/bufsize/README.md create mode 100644 plugin/bufsize/bufsize.go create mode 100644 plugin/bufsize/bufsize_test.go create mode 100644 plugin/bufsize/setup.go create mode 100644 plugin/bufsize/setup_test.go diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go index 8e2153aadc8..61d96c6339f 100644 --- a/core/dnsserver/zdirectives.go +++ b/core/dnsserver/zdirectives.go @@ -15,6 +15,7 @@ var Directives = []string{ "tls", "reload", "nsid", + "bufsize", "root", "bind", "debug", diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index 79892235f8e..90267f29bc4 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -11,6 +11,7 @@ import ( _ "github.com/coredns/coredns/plugin/autopath" _ "github.com/coredns/coredns/plugin/azure" _ "github.com/coredns/coredns/plugin/bind" + _ "github.com/coredns/coredns/plugin/bufsize" _ "github.com/coredns/coredns/plugin/cache" _ "github.com/coredns/coredns/plugin/cancel" _ "github.com/coredns/coredns/plugin/chaos" diff --git a/plugin.cfg b/plugin.cfg index 47b9ecb517a..d40cf7a33cd 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -24,6 +24,7 @@ cancel:cancel tls:tls reload:reload nsid:nsid +bufsize:bufsize root:root bind:bind debug:debug diff --git a/plugin/bufsize/README.md b/plugin/bufsize/README.md new file mode 100644 index 00000000000..bd1d232d10f --- /dev/null +++ b/plugin/bufsize/README.md @@ -0,0 +1,30 @@ +# bufsize +## Name +*bufsize* - sizes EDNS0 buffer size to prevent IP fragmentation. + +## Description +*bufsize* limits a requester's UDP payload size. +It prevents IP fragmentation so that to deal with DNS vulnerability. + +## Syntax +```txt +bufsize [SIZE] +``` + +**[SIZE]** is an int value for setting the buffer size. +The default value is 512, and the value must be within 512 - 4096. +Only one argument is acceptable, and it covers both IPv4 and IPv6. + +## Examples +```corefile +. { + bufsize 512 + forward . 172.31.0.10 + log +} +``` + +If you run a resolver on 172.31.0.10, the buffer size of incoming query on the resolver will be set to 512 bytes. + +## Considerations +For now, if a client does not use EDNS, this plugin adds OPT RR. \ No newline at end of file diff --git a/plugin/bufsize/bufsize.go b/plugin/bufsize/bufsize.go new file mode 100644 index 00000000000..1522be8948b --- /dev/null +++ b/plugin/bufsize/bufsize.go @@ -0,0 +1,31 @@ +// Package bufsize implements a plugin that modifies EDNS0 buffer size. +package bufsize + +import ( + "context" + + "github.com/coredns/coredns/plugin" + + "github.com/miekg/dns" +) + +// Bufsize implements bufsize plugin. +type Bufsize struct { + Next plugin.Handler + Size int +} + +// ServeDNS implements the plugin.Handler interface. +func (buf Bufsize) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { + if option := r.IsEdns0(); option != nil { + option.SetUDPSize(uint16(buf.Size)) + } else { + // If a client does not use EDNS, add it + r.SetEdns0(uint16(buf.Size), false) + } + + return plugin.NextOrFailure(buf.Name(), buf.Next, ctx, w, r) +} + +// Name implements the Handler interface. +func (buf Bufsize) Name() string { return "bufsize" } diff --git a/plugin/bufsize/bufsize_test.go b/plugin/bufsize/bufsize_test.go new file mode 100644 index 00000000000..3d714d2f194 --- /dev/null +++ b/plugin/bufsize/bufsize_test.go @@ -0,0 +1,72 @@ +package bufsize + +import ( + "context" + "testing" + + "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/test" + "github.com/coredns/coredns/plugin/whoami" + + "github.com/miekg/dns" +) + +func TestBufsize(t *testing.T) { + em := Bufsize{ + Size: 512, + } + + tests := []struct { + next plugin.Handler + qname string + inputBufsize uint16 + outgoingBufsize uint16 + expectedErr error + }{ + // This plugin is responsible for limiting outgoing query's bufize + { + next: whoami.Whoami{}, + qname: ".", + inputBufsize: 1200, + outgoingBufsize: 512, + expectedErr: nil, + }, + // If EDNS is not enabled, this plugin adds it + { + next: whoami.Whoami{}, + qname: ".", + outgoingBufsize: 512, + expectedErr: nil, + }, + } + + for i, tc := range tests { + req := new(dns.Msg) + req.SetQuestion(dns.Fqdn(tc.qname), dns.TypeA) + req.Question[0].Qclass = dns.ClassINET + em.Next = tc.next + + if tc.inputBufsize != 0 { + req.SetEdns0(tc.inputBufsize, false) + } + + _, err := em.ServeDNS(context.Background(), &test.ResponseWriter{}, req) + + if err != tc.expectedErr { + t.Errorf("Test %d: Expected error is %v, but got %v", i, tc.expectedErr, err) + } + + if tc.outgoingBufsize != 0 { + for _, extra := range req.Extra { + if option, ok := extra.(*dns.OPT); ok { + b := option.UDPSize() + if b != tc.outgoingBufsize { + t.Errorf("Test %d: Expected outgoing bufsize is %d, but got %d", i, tc.outgoingBufsize, b) + } + } else { + t.Errorf("Test %d: Not found OPT RR.", i) + } + } + } + } +} diff --git a/plugin/bufsize/setup.go b/plugin/bufsize/setup.go new file mode 100644 index 00000000000..28586c76ebc --- /dev/null +++ b/plugin/bufsize/setup.go @@ -0,0 +1,52 @@ +package bufsize + +import ( + "strconv" + + "github.com/coredns/coredns/core/dnsserver" + "github.com/coredns/coredns/plugin" + + "github.com/caddyserver/caddy" +) + +func init() { plugin.Register("bufsize", setup) } + +func setup(c *caddy.Controller) error { + bufsize, err := parse(c) + if err != nil { + return plugin.Error("bufsize", err) + } + + dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { + return Bufsize{Next: next, Size: bufsize} + }) + + return nil +} + +func parse(c *caddy.Controller) (int, error) { + const defaultBufSize = 512 + for c.Next() { + args := c.RemainingArgs() + switch len(args) { + case 0: + // Nothing specified; use 512 as default + return defaultBufSize, nil + case 1: + // Specified value is needed to verify + bufsize, err := strconv.Atoi(args[0]) + if err != nil { + return -1, plugin.Error("bufsize", c.ArgErr()) + } + // Follows RFC 6891 + if bufsize < 512 || bufsize > 4096 { + return -1, plugin.Error("bufsize", c.ArgErr()) + } + return bufsize, nil + default: + // Only 1 argument is acceptable + return -1, plugin.Error("bufsize", c.ArgErr()) + } + } + return -1, plugin.Error("bufsize", c.ArgErr()) +} diff --git a/plugin/bufsize/setup_test.go b/plugin/bufsize/setup_test.go new file mode 100644 index 00000000000..4d1705a0533 --- /dev/null +++ b/plugin/bufsize/setup_test.go @@ -0,0 +1,46 @@ +package bufsize + +import ( + "strings" + "testing" + + "github.com/caddyserver/caddy" +) + +func TestSetupBufsize(t *testing.T) { + tests := []struct { + input string + shouldErr bool + expectedData int + expectedErrContent string // substring from the expected error. Empty for positive cases. + }{ + {`bufsize`, false, 512, ""}, + {`bufsize "1232"`, false, 1232, ""}, + {`bufsize "5000"`, true, -1, "plugin"}, + {`bufsize "512 512"`, true, -1, "plugin"}, + {`bufsize "abc123"`, true, -1, "plugin"}, + } + + for i, test := range tests { + c := caddy.NewTestController("dns", test.input) + bufsize, err := parse(c) + + if test.shouldErr && err == nil { + t.Errorf("Test %d: Expected error but found %s for input %s", i, err, test.input) + } + + if err != nil { + if !test.shouldErr { + t.Errorf("Test %d: Error found for input %s. Error: %v", i, test.input, err) + } + + if !strings.Contains(err.Error(), test.expectedErrContent) { + t.Errorf("Test %d: Expected error to contain: %v, found error: %v, input: %s", i, test.expectedErrContent, err, test.input) + } + } + + if !test.shouldErr && bufsize != test.expectedData { + t.Errorf("Test %d: Bufsize not correctly set for input %s. Expected: %d, actual: %d", i, test.input, test.expectedData, bufsize) + } + } +} From 591942a99ca061bc4694770f00576bc1d45b2070 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2019 06:59:01 -0800 Subject: [PATCH 264/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.25 to 1.25.31 (#3438) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.25 to 1.25.31. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.25...v1.25.31) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d5157b32258..a15df21204b 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.25.25 + github.com/aws/aws-sdk-go v1.25.31 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 28fd7055f00..932c7e6379e 100644 --- a/go.sum +++ b/go.sum @@ -64,6 +64,8 @@ github.com/aws/aws-sdk-go v1.25.19 h1:sp3xP91qIAVhWufyn9qM6Zhhn6kX06WJQcmhRj7QTX github.com/aws/aws-sdk-go v1.25.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.25 h1:j3HLOqcDWjNox1DyvJRs+kVQF42Ghtv6oL6cVBfXS3U= github.com/aws/aws-sdk-go v1.25.25/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.31 h1:14mdh3HsTgRekePPkYcCbAaEXJknc3mN7f4XfsiMMDA= +github.com/aws/aws-sdk-go v1.25.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 032a4d9e52147f81682d3f671294654835bf17ae Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2019 06:59:19 -0800 Subject: [PATCH 265/324] build(deps): bump google.golang.org/grpc from 1.24.0 to 1.25.1 (#3439) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.24.0 to 1.25.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.24.0...v1.25.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index a15df21204b..6693b59da8e 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( golang.org/x/sys v0.0.0-20191010194322-b09406accb47 google.golang.org/api v0.13.0 google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect - google.golang.org/grpc v1.24.0 + google.golang.org/grpc v1.25.1 gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 diff --git a/go.sum b/go.sum index 932c7e6379e..4b1bf4a0958 100644 --- a/go.sum +++ b/go.sum @@ -76,6 +76,7 @@ github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= @@ -114,6 +115,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -477,12 +480,17 @@ google.golang.org/genproto v0.0.0-20190626174449-989357319d63 h1:UsSJe9fhWNSz6em google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD5x+fRy6U0SXxZehzUHRSYUJcKfII= google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 h1:n0pmVs3TxCRC9Jrd1/aNaB8roW5ZHMw0UOWPxAoUkkI= gopkg.in/DataDog/dd-trace-go.v1 v1.18.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 h1:aFSFd6oDMdvPYiToGqTv7/ERA6QrPhGaXSuueRCaM88= From 1743ca3f026b25eb5b0311f134a5b25bb0b66321 Mon Sep 17 00:00:00 2001 From: Kohei Yoshida <14937183+ykhr53@users.noreply.github.com> Date: Wed, 13 Nov 2019 09:49:24 +0000 Subject: [PATCH 266/324] add OWNERS file (#3441) Signed-off-by: yukihira --- plugin/bufsize/OWNERS | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 plugin/bufsize/OWNERS diff --git a/plugin/bufsize/OWNERS b/plugin/bufsize/OWNERS new file mode 100644 index 00000000000..e40116eae1a --- /dev/null +++ b/plugin/bufsize/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - ykhr53 +approvers: + - ykhr53 \ No newline at end of file From 8cc034eeceb766ffe966bdd29923f8325dd3f4e5 Mon Sep 17 00:00:00 2001 From: Kohei Yoshida <14937183+ykhr53@users.noreply.github.com> Date: Thu, 14 Nov 2019 07:37:45 +0000 Subject: [PATCH 267/324] plugin/bufsize: add usecase description (#3437) * add usecase Signed-off-by: yukihira * fix some comments Signed-off-by: ykhr53 --- plugin/bufsize/README.md | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/plugin/bufsize/README.md b/plugin/bufsize/README.md index bd1d232d10f..297941059e4 100644 --- a/plugin/bufsize/README.md +++ b/plugin/bufsize/README.md @@ -16,6 +16,7 @@ The default value is 512, and the value must be within 512 - 4096. Only one argument is acceptable, and it covers both IPv4 and IPv6. ## Examples +Enable limiting the buffer size of outgoing query to the resolver (172.31.0.10): ```corefile . { bufsize 512 @@ -24,7 +25,15 @@ Only one argument is acceptable, and it covers both IPv4 and IPv6. } ``` -If you run a resolver on 172.31.0.10, the buffer size of incoming query on the resolver will be set to 512 bytes. +Enable limiting the buffer size as an authoritative nameserver: +```corefile +. { + bufsize 512 + file db.example.org + log +} +``` ## Considerations -For now, if a client does not use EDNS, this plugin adds OPT RR. \ No newline at end of file +- Setting 1232 bytes to bufsize may avoid fragmentation on the majority of networks in use today, but it depends on the MTU of the physical network links. +- For now, if a client does not use EDNS, this plugin adds OPT RR. \ No newline at end of file From 672992751e37edaa6f05f7f11d1aa8037bc66907 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Thu, 14 Nov 2019 11:13:49 -0500 Subject: [PATCH 268/324] plugin/bufsize: remove trailing whitespace (#3444) --- plugin/bufsize/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin/bufsize/README.md b/plugin/bufsize/README.md index 297941059e4..65c73ab6e56 100644 --- a/plugin/bufsize/README.md +++ b/plugin/bufsize/README.md @@ -3,7 +3,7 @@ *bufsize* - sizes EDNS0 buffer size to prevent IP fragmentation. ## Description -*bufsize* limits a requester's UDP payload size. +*bufsize* limits a requester's UDP payload size. It prevents IP fragmentation so that to deal with DNS vulnerability. ## Syntax @@ -11,8 +11,8 @@ It prevents IP fragmentation so that to deal with DNS vulnerability. bufsize [SIZE] ``` -**[SIZE]** is an int value for setting the buffer size. -The default value is 512, and the value must be within 512 - 4096. +**[SIZE]** is an int value for setting the buffer size. +The default value is 512, and the value must be within 512 - 4096. Only one argument is acceptable, and it covers both IPv4 and IPv6. ## Examples @@ -36,4 +36,4 @@ Enable limiting the buffer size as an authoritative nameserver: ## Considerations - Setting 1232 bytes to bufsize may avoid fragmentation on the majority of networks in use today, but it depends on the MTU of the physical network links. -- For now, if a client does not use EDNS, this plugin adds OPT RR. \ No newline at end of file +- For now, if a client does not use EDNS, this plugin adds OPT RR. From e1782c46e86908340179fff18def345f166c2826 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 15 Nov 2019 14:11:21 +0000 Subject: [PATCH 269/324] build: go mod tidy (#3451) tidy up a bit; go build also made changs to go.mod, meaning this would have been (again) a dirty release. Need some better automation for this crap. Signed-off-by: Miek Gieben --- go.mod | 1 - go.sum | 34 ---------------------------------- 2 files changed, 35 deletions(-) diff --git a/go.mod b/go.mod index 6693b59da8e..bdcbbd5ad06 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,6 @@ require ( golang.org/x/net v0.0.0-20191003171128-d98b1b443823 // indirect golang.org/x/sys v0.0.0-20191010194322-b09406accb47 google.golang.org/api v0.13.0 - google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df // indirect google.golang.org/grpc v1.25.1 gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 4b1bf4a0958..b41a68a1034 100644 --- a/go.sum +++ b/go.sum @@ -9,8 +9,6 @@ github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezq github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.1 h1:JB7Mqhna/7J8gZfVHjxDSTLSD6ciz2YgSMb/4qLXTtY= -github.com/Azure/go-autorest/autorest v0.9.1/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= @@ -18,8 +16,6 @@ github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/azure/auth v0.3.0 h1:JwftqZDtWkr3qt1kcEgPd7H57uCHsXKXf66agWUQcGw= -github.com/Azure/go-autorest/autorest/azure/auth v0.3.0/go.mod h1:CI4BQYBct8NS7BXNBBX+RchsFsUu5+oz+OSyR/ZIi7U= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= @@ -54,16 +50,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aws/aws-sdk-go v1.25.1 h1:d7zDXFT2Tgq/yw7Wku49+lKisE8Xc85erb+8PlE/Shk= -github.com/aws/aws-sdk-go v1.25.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.11 h1:wUivbsVOH3LpHdC3Rl5i+FLHfg4sOmYgv4bvHe7+/Pg= -github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.16 h1:k7Fy6T/uNuLX6zuayU/TJoP7yMgGcJSkZpF7QVjwYpA= -github.com/aws/aws-sdk-go v1.25.16/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.19 h1:sp3xP91qIAVhWufyn9qM6Zhhn6kX06WJQcmhRj7QTXc= -github.com/aws/aws-sdk-go v1.25.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.25 h1:j3HLOqcDWjNox1DyvJRs+kVQF42Ghtv6oL6cVBfXS3U= -github.com/aws/aws-sdk-go v1.25.25/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.31 h1:14mdh3HsTgRekePPkYcCbAaEXJknc3mN7f4XfsiMMDA= github.com/aws/aws-sdk-go v1.25.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -285,8 +271,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -296,15 +280,11 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCb github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= @@ -422,14 +402,10 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191003212358-c178f38b412c h1:6Zx7DRlKXf79yfxuQ/7GqV3w2y7aDsk6bGg0MzF5RVU= -golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -460,10 +436,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+y golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.11.0 h1:n/qM3q0/rV2F0pox7o0CvNhlPvZAo7pLbef122cbLJ0= -google.golang.org/api v0.11.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -478,8 +450,6 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190626174449-989357319d63 h1:UsSJe9fhWNSz6emfIGPpH5DF23t7ALo2Pf3sC+/hsdg= google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df h1:k3DT34vxk64+4bD5x+fRy6U0SXxZehzUHRSYUJcKfII= -google.golang.org/genproto v0.0.0-20190701230453-710ae3a149df/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -487,12 +457,8 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -gopkg.in/DataDog/dd-trace-go.v1 v1.18.0 h1:n0pmVs3TxCRC9Jrd1/aNaB8roW5ZHMw0UOWPxAoUkkI= -gopkg.in/DataDog/dd-trace-go.v1 v1.18.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 h1:aFSFd6oDMdvPYiToGqTv7/ERA6QrPhGaXSuueRCaM88= gopkg.in/DataDog/dd-trace-go.v1 v1.19.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= From 1e3330c12b285de45888942d8dd69c6e4253098e Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 15 Nov 2019 14:14:29 +0000 Subject: [PATCH 270/324] pkg/fall: add (a lot of) guidance (#3450) update the pkg doc to talk about various trade off Fixes #2723 Signed-off-by: Miek Gieben --- plugin/pkg/fall/fall.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/plugin/pkg/fall/fall.go b/plugin/pkg/fall/fall.go index c8cdc6ff68c..067deb9a617 100644 --- a/plugin/pkg/fall/fall.go +++ b/plugin/pkg/fall/fall.go @@ -1,4 +1,16 @@ -// Package fall handles the fallthrough logic used in plugins that support it. +// Package fall handles the fallthrough logic used in plugins that support it. Be careful when including this +// functionality in your plugin. Why? In the DNS only 1 source is authoritative for a set of names. Fallthrough +// breaks this convention by allowing a plugin to query multiple sources, depending on the replies it got sofar. +// +// This may cause issues in downstream caches, where different answers for the same query can potentially confuse clients. +// On the other hand this is a powerful feature that can aid in migration or other edge cases. +// +// The take away: be mindful of this and don't blindly assume it's a good feature to have in your plugin. +// +// See http://github.com/coredns/coredns/issues/2723 for some discussion on this, which includes this quote: +// +// TL;DR: `fallthrough` is indeed risky and hackish, but still a good feature of CoreDNS as it allows to quickly answer boring edge cases. +// package fall import ( From 4831e7f9477dcdc7847b0a76fab09fcd17447bb2 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 15 Nov 2019 15:45:09 +0000 Subject: [PATCH 271/324] bufsize: go gen and docs (#3449) Run 'go generate' and 'make -f Makefile.doc' to generate the chaos plugin author list and update the manual pages. Signed-off-by: Miek Gieben --- man/coredns-bufsize.7 | 67 +++++++++++++++++++++++++++++++++++++++++ man/coredns-reload.7 | 6 ++-- man/coredns-transfer.7 | 7 ++--- plugin/chaos/zowners.go | 2 +- 4 files changed, 74 insertions(+), 8 deletions(-) create mode 100644 man/coredns-bufsize.7 diff --git a/man/coredns-bufsize.7 b/man/coredns-bufsize.7 new file mode 100644 index 00000000000..e337ef61f4f --- /dev/null +++ b/man/coredns-bufsize.7 @@ -0,0 +1,67 @@ +.\" Generated by Mmark Markdown Processer - mmark.miek.nl +.TH "COREDNS-BUFSIZE" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" + +.SH "NAME" +.PP +\fIbufsize\fP - sizes EDNS0 buffer size to prevent IP fragmentation. + +.SH "DESCRIPTION" +.PP +\fIbufsize\fP limits a requester's UDP payload size. +It prevents IP fragmentation so that to deal with DNS vulnerability. + +.SH "SYNTAX" +.PP +.RS + +.nf +bufsize [SIZE] + +.fi +.RE + +.PP +\fB[SIZE]\fP is an int value for setting the buffer size. +The default value is 512, and the value must be within 512 - 4096. +Only one argument is acceptable, and it covers both IPv4 and IPv6. + +.SH "EXAMPLES" +.PP +Enable limiting the buffer size of outgoing query to the resolver (172.31.0.10): + +.PP +.RS + +.nf +\&. { + bufsize 512 + forward . 172.31.0.10 + log +} + +.fi +.RE + +.PP +Enable limiting the buffer size as an authoritative nameserver: + +.PP +.RS + +.nf +\&. { + bufsize 512 + file db.example.org + log +} + +.fi +.RE + +.SH "CONSIDERATIONS" +.IP \(bu 4 +Setting 1232 bytes to bufsize may avoid fragmentation on the majority of networks in use today, but it depends on the MTU of the physical network links. +.IP \(bu 4 +For now, if a client does not use EDNS, this plugin adds OPT RR. + + diff --git a/man/coredns-reload.7 b/man/coredns-reload.7 index a4fdefe8a51..97c81c9b9da 100644 --- a/man/coredns-reload.7 +++ b/man/coredns-reload.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-RELOAD" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-RELOAD" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -52,8 +52,8 @@ reload [INTERVAL] [JITTER] The plugin will check for changes every \fBINTERVAL\fP, subject to +/- the \fBJITTER\fP duration. .IP \(bu 4 -\fBINTERVAL\fP and \fBJITTER\fP are Golang (durations)[https://golang.org/pkg/time/#ParseDuration -\[la]https://golang.org/pkg/time/#ParseDuration\[ra]]. +\fBINTERVAL\fP and \fBJITTER\fP are Golang durations +\[la]https://golang.org/pkg/time/#ParseDuration\[ra]. The default \fBINTERVAL\fP is 30s, default \fBJITTER\fP is 15s, the minimal value for \fBINTERVAL\fP is 2s, and for \fBJITTER\fP it is 1s. If \fBJITTER\fP is more than half of \fBINTERVAL\fP, it will be set to half of \fBINTERVAL\fP diff --git a/man/coredns-transfer.7 b/man/coredns-transfer.7 index fd9e0e7ef87..96091556c7d 100644 --- a/man/coredns-transfer.7 +++ b/man/coredns-transfer.7 @@ -3,8 +3,7 @@ .SH "NAME" .PP -\fItransfer\fP - answer zone transfers requests for compatible authoritative -plugins. +\fItransfer\fP - perform zone transfers for other plugins. .SH "DESCRIPTION" .PP @@ -12,8 +11,8 @@ This plugin answers zone transfers for authoritative plugins that implement \fB\fCtransfer.Transferer\fR. .PP -Transfer answers AXFR requests and IXFR requests with AXFR fallback if the -zone has changed. +Transfer answers full zone transfer (AXFR) requests and incremental zone transfer (IXFR) requests +with AXFR fallback if the zone has changed. .PP Notifies are not currently supported. diff --git a/plugin/chaos/zowners.go b/plugin/chaos/zowners.go index c423fc0c4a9..d7ec383dbad 100644 --- a/plugin/chaos/zowners.go +++ b/plugin/chaos/zowners.go @@ -1,4 +1,4 @@ package chaos // Owners are all GitHub handlers of all maintainers. -var Owners = []string{"bradbeam", "chrisohaver", "darshanime", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "ihac", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "yongtang"} +var Owners = []string{"bradbeam", "chrisohaver", "darshanime", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "ihac", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "ykhr53", "yongtang"} From e14e053d3df3e607dc0e9fd7bb43d64eaf581162 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 17 Nov 2019 02:02:46 +0000 Subject: [PATCH 272/324] create pkg/reuseport (#3455) * create pkg/reuseport Move the core server listening functions to a new package so plugins can use them. Also make *all* servers use the functions here; as only the udp/tcp listeners where using SO_REUSEPORT (if available). This is the only actual change in this PR; in it's core it's just a move of 2 files. This can also be used to cleanup the dance we're doing now for re-acquiring the sockets in e.g. the metrics plugins and the ready plugin. Signed-off-by: Miek Gieben * Also push a small doc update Signed-off-by: Miek Gieben --- core/dnsserver/listen_go111.go | 33 --------------------- core/dnsserver/listen_go_not111.go | 11 ------- core/dnsserver/server.go | 5 ++-- core/dnsserver/server_grpc.go | 3 +- core/dnsserver/server_https.go | 3 +- core/dnsserver/server_tls.go | 3 +- plugin.md | 5 ++++ plugin/pkg/reuseport/listen_go111.go | 37 ++++++++++++++++++++++++ plugin/pkg/reuseport/listen_go_not111.go | 13 +++++++++ 9 files changed, 64 insertions(+), 49 deletions(-) delete mode 100644 core/dnsserver/listen_go111.go delete mode 100644 core/dnsserver/listen_go_not111.go create mode 100644 plugin/pkg/reuseport/listen_go111.go create mode 100644 plugin/pkg/reuseport/listen_go_not111.go diff --git a/core/dnsserver/listen_go111.go b/core/dnsserver/listen_go111.go deleted file mode 100644 index 573988b3323..00000000000 --- a/core/dnsserver/listen_go111.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.11 -// +build aix darwin dragonfly freebsd linux netbsd openbsd - -package dnsserver - -import ( - "context" - "net" - "syscall" - - "github.com/coredns/coredns/plugin/pkg/log" - - "golang.org/x/sys/unix" -) - -func reuseportControl(network, address string, c syscall.RawConn) error { - c.Control(func(fd uintptr) { - if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { - log.Warningf("Failed to set SO_REUSEPORT on socket: %s", err) - } - }) - return nil -} - -func listen(network, addr string) (net.Listener, error) { - lc := net.ListenConfig{Control: reuseportControl} - return lc.Listen(context.Background(), network, addr) -} - -func listenPacket(network, addr string) (net.PacketConn, error) { - lc := net.ListenConfig{Control: reuseportControl} - return lc.ListenPacket(context.Background(), network, addr) -} diff --git a/core/dnsserver/listen_go_not111.go b/core/dnsserver/listen_go_not111.go deleted file mode 100644 index 11021d09991..00000000000 --- a/core/dnsserver/listen_go_not111.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd - -package dnsserver - -import "net" - -func listen(network, addr string) (net.Listener, error) { return net.Listen(network, addr) } - -func listenPacket(network, addr string) (net.PacketConn, error) { - return net.ListenPacket(network, addr) -} diff --git a/core/dnsserver/server.go b/core/dnsserver/server.go index 9a6fea1ced4..9b8fb23e4b2 100644 --- a/core/dnsserver/server.go +++ b/core/dnsserver/server.go @@ -15,6 +15,7 @@ import ( "github.com/coredns/coredns/plugin/pkg/edns" "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/plugin/pkg/rcode" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/coredns/coredns/plugin/pkg/trace" "github.com/coredns/coredns/plugin/pkg/transport" "github.com/coredns/coredns/request" @@ -126,7 +127,7 @@ func (s *Server) ServePacket(p net.PacketConn) error { // Listen implements caddy.TCPServer interface. func (s *Server) Listen() (net.Listener, error) { - l, err := listen("tcp", s.Addr[len(transport.DNS+"://"):]) + l, err := reuseport.Listen("tcp", s.Addr[len(transport.DNS+"://"):]) if err != nil { return nil, err } @@ -140,7 +141,7 @@ func (s *Server) WrapListener(ln net.Listener) net.Listener { // ListenPacket implements caddy.UDPServer interface. func (s *Server) ListenPacket() (net.PacketConn, error) { - p, err := listenPacket("udp", s.Addr[len(transport.DNS+"://"):]) + p, err := reuseport.ListenPacket("udp", s.Addr[len(transport.DNS+"://"):]) if err != nil { return nil, err } diff --git a/core/dnsserver/server_grpc.go b/core/dnsserver/server_grpc.go index e4f48ddec1f..7b530f97a90 100644 --- a/core/dnsserver/server_grpc.go +++ b/core/dnsserver/server_grpc.go @@ -8,6 +8,7 @@ import ( "net" "github.com/coredns/coredns/pb" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/coredns/coredns/plugin/pkg/transport" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" @@ -72,7 +73,7 @@ func (s *ServergRPC) ServePacket(p net.PacketConn) error { return nil } // Listen implements caddy.TCPServer interface. func (s *ServergRPC) Listen() (net.Listener, error) { - l, err := net.Listen("tcp", s.Addr[len(transport.GRPC+"://"):]) + l, err := reuseport.Listen("tcp", s.Addr[len(transport.GRPC+"://"):]) if err != nil { return nil, err } diff --git a/core/dnsserver/server_https.go b/core/dnsserver/server_https.go index 6b5a7238406..d2515295c69 100644 --- a/core/dnsserver/server_https.go +++ b/core/dnsserver/server_https.go @@ -12,6 +12,7 @@ import ( "github.com/coredns/coredns/plugin/pkg/dnsutil" "github.com/coredns/coredns/plugin/pkg/doh" "github.com/coredns/coredns/plugin/pkg/response" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/coredns/coredns/plugin/pkg/transport" ) @@ -61,7 +62,7 @@ func (s *ServerHTTPS) ServePacket(p net.PacketConn) error { return nil } // Listen implements caddy.TCPServer interface. func (s *ServerHTTPS) Listen() (net.Listener, error) { - l, err := net.Listen("tcp", s.Addr[len(transport.HTTPS+"://"):]) + l, err := reuseport.Listen("tcp", s.Addr[len(transport.HTTPS+"://"):]) if err != nil { return nil, err } diff --git a/core/dnsserver/server_tls.go b/core/dnsserver/server_tls.go index 1bc6e6ac25f..0b7fa517a2d 100644 --- a/core/dnsserver/server_tls.go +++ b/core/dnsserver/server_tls.go @@ -6,6 +6,7 @@ import ( "fmt" "net" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/coredns/coredns/plugin/pkg/transport" "github.com/miekg/dns" @@ -57,7 +58,7 @@ func (s *ServerTLS) ServePacket(p net.PacketConn) error { return nil } // Listen implements caddy.TCPServer interface. func (s *ServerTLS) Listen() (net.Listener, error) { - l, err := net.Listen("tcp", s.Addr[len(transport.TLS+"://"):]) + l, err := reuseport.Listen("tcp", s.Addr[len(transport.TLS+"://"):]) if err != nil { return nil, err } diff --git a/plugin.md b/plugin.md index f00cfe712ab..915ff8fc269 100644 --- a/plugin.md +++ b/plugin.md @@ -64,6 +64,11 @@ a *Metrics* section detailing the metrics. If the plugin supports signalling readiness it should have a *Ready* section detailing how it works, and implement the `ready.Readiness` interface. +## Opening Sockets + +See the plugin/pkg/reuseport for `Listen` and `ListenPacket` functions. Using these functions makes +you plugin handle reload events better. + ## Documentation Each plugin should have a README.md explaining what the plugin does and how it is configured. The diff --git a/plugin/pkg/reuseport/listen_go111.go b/plugin/pkg/reuseport/listen_go111.go new file mode 100644 index 00000000000..fa6f365d6e7 --- /dev/null +++ b/plugin/pkg/reuseport/listen_go111.go @@ -0,0 +1,37 @@ +// +build go1.11 +// +build aix darwin dragonfly freebsd linux netbsd openbsd + +package reuseport + +import ( + "context" + "net" + "syscall" + + "github.com/coredns/coredns/plugin/pkg/log" + + "golang.org/x/sys/unix" +) + +func control(network, address string, c syscall.RawConn) error { + c.Control(func(fd uintptr) { + if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil { + log.Warningf("Failed to set SO_REUSEPORT on socket: %s", err) + } + }) + return nil +} + +// Listen announces on the local network address. See net.Listen for more information. +// If SO_REUSEPORT is available it will be set on the socket. +func Listen(network, addr string) (net.Listener, error) { + lc := net.ListenConfig{Control: control} + return lc.Listen(context.Background(), network, addr) +} + +// ListenPacket announces on the local network address. See net.ListenPacket for more information. +// If SO_REUSEPORT is available it will be set on the socket. +func ListenPacket(network, addr string) (net.PacketConn, error) { + lc := net.ListenConfig{Control: control} + return lc.ListenPacket(context.Background(), network, addr) +} diff --git a/plugin/pkg/reuseport/listen_go_not111.go b/plugin/pkg/reuseport/listen_go_not111.go new file mode 100644 index 00000000000..e3bdfb90690 --- /dev/null +++ b/plugin/pkg/reuseport/listen_go_not111.go @@ -0,0 +1,13 @@ +// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd + +package reuseport + +import "net" + +// Listen is a wrapper around net.Listen. +func Listen(network, addr string) (net.Listener, error) { return net.Listen(network, addr) } + +// ListenPacket is a wrapper around net.ListenPacket. +func ListenPacket(network, addr string) (net.PacketConn, error) { + return net.ListenPacket(network, addr) +} From f100d6118340edb68197af7a83e90fb232af70a8 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sun, 17 Nov 2019 10:53:10 -0800 Subject: [PATCH 273/324] Fix incorrect sample configuration in clouddns (#3457) Automatically submitted. --- plugin/clouddns/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/clouddns/README.md b/plugin/clouddns/README.md index 18d95cb31a3..9ae6d52e086 100644 --- a/plugin/clouddns/README.md +++ b/plugin/clouddns/README.md @@ -59,7 +59,7 @@ Enable clouddns with fallthrough: ~~~ txt example.org { - clouddns example.org.:gcp-example-project:example-zone clouddns example.com.:gcp-example-project:example-zone-2 { + clouddns example.org.:gcp-example-project:example-zone example.com.:gcp-example-project:example-zone-2 { fallthrough example.gov. } } From f91c55d6cdf81f56707d0ee6b47f50d164555d68 Mon Sep 17 00:00:00 2001 From: Gonzalo Paniagua Javier Date: Sun, 17 Nov 2019 23:58:00 -0800 Subject: [PATCH 274/324] Fix reloading in plugin/pprof. (#3454) * Fix reloading in plugin/pprof. Reloading the server without changing the listen address results in an error because Startup is called for newly set up plugins before Shutdown is called for the old ones. Signed-off-by: Gonzalo Paniagua Javier * Use pkg/reuseport when listening. Use coredns' newly added reuseport. Signed-off-by: Gonzalo Paniagua Javier * Revert go.{mod,sum} changes. Signed-off-by: Gonzalo Paniagua Javier --- plugin/pprof/pprof.go | 7 ++++++- plugin/pprof/setup.go | 10 ++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/plugin/pprof/pprof.go b/plugin/pprof/pprof.go index c5301008918..ddf5ab2dfbf 100644 --- a/plugin/pprof/pprof.go +++ b/plugin/pprof/pprof.go @@ -7,6 +7,8 @@ import ( "net/http" pp "net/http/pprof" "runtime" + + "github.com/coredns/coredns/plugin/pkg/reuseport" ) type handler struct { @@ -17,7 +19,10 @@ type handler struct { } func (h *handler) Startup() error { - ln, err := net.Listen("tcp", h.addr) + // Reloading the plugin without changing the listening address results + // in an error unless we reuse the port because Startup is called for + // new handlers before Shutdown is called for the old ones. + ln, err := reuseport.Listen("tcp", h.addr) if err != nil { log.Errorf("Failed to start pprof handler: %s", err) return err diff --git a/plugin/pprof/setup.go b/plugin/pprof/setup.go index 8de3b2ac0bf..a9b93673cef 100644 --- a/plugin/pprof/setup.go +++ b/plugin/pprof/setup.go @@ -3,7 +3,6 @@ package pprof import ( "net" "strconv" - "sync" "github.com/coredns/coredns/plugin" clog "github.com/coredns/coredns/plugin/pkg/log" @@ -62,12 +61,7 @@ func setup(c *caddy.Controller) error { } - pprofOnce.Do(func() { - c.OnStartup(h.Startup) - c.OnShutdown(h.Shutdown) - }) - + c.OnStartup(h.Startup) + c.OnShutdown(h.Shutdown) return nil } - -var pprofOnce sync.Once From fe24096c57c044351a5d43af87a17072e097f7d7 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2019 12:25:43 +0100 Subject: [PATCH 275/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.31 to 1.25.36 (#3463) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.31 to 1.25.36. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.31...v1.25.36) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index bdcbbd5ad06..dfa9f6a9ece 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect - github.com/aws/aws-sdk-go v1.25.31 + github.com/aws/aws-sdk-go v1.25.36 github.com/caddyserver/caddy v1.0.3 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index b41a68a1034..7a8e63350ab 100644 --- a/go.sum +++ b/go.sum @@ -52,6 +52,8 @@ github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.25.31 h1:14mdh3HsTgRekePPkYcCbAaEXJknc3mN7f4XfsiMMDA= github.com/aws/aws-sdk-go v1.25.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.36 h1:4+TL/Y2G5hsR1zdfHmjNG1ou1WEqsSWk8v7m1GaDKyo= +github.com/aws/aws-sdk-go v1.25.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 352266e64bdc8b44db498b452777760c2ccf770c Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2019 12:28:13 +0000 Subject: [PATCH 276/324] build(deps): bump github.com/caddyserver/caddy from 1.0.3 to 1.0.4 (#3461) Bumps [github.com/caddyserver/caddy](https://github.com/caddyserver/caddy) from 1.0.3 to 1.0.4. - [Release notes](https://github.com/caddyserver/caddy/releases) - [Commits](https://github.com/caddyserver/caddy/compare/v1.0.3...v1.0.4) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 132 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 133 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index dfa9f6a9ece..394f42e02e4 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.12.0 // indirect github.com/aws/aws-sdk-go v1.25.36 - github.com/caddyserver/caddy v1.0.3 + github.com/caddyserver/caddy v1.0.4 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect diff --git a/go.sum b/go.sum index 7a8e63350ab..80deb61ca59 100644 --- a/go.sum +++ b/go.sum @@ -3,21 +3,29 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= github.com/Azure/azure-sdk-for-go v31.1.0+incompatible h1:5SzgnfAvUBdBwNTN23WLfZoCt/rGhLvd7QdCAaFXgY4= github.com/Azure/azure-sdk-for-go v31.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v32.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezqVhr2h6t9LJIEKVO0= github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -29,8 +37,10 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtO github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= @@ -40,20 +50,28 @@ github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.21.0 h1:0GKs+e8mn1RRUzfg9oUXv3v7ZieQLmOZF/bfnmmGhM8= github.com/Shopify/sarama v1.21.0/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/akamai/AkamaiOPEN-edgegrid-golang v0.9.0/go.mod h1:zpDJeKyp9ScW4NNrbdr+Eyxvry3ilGPewKoXw3XGN1k= +github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190808125512-07798873deee/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.31 h1:14mdh3HsTgRekePPkYcCbAaEXJknc3mN7f4XfsiMMDA= github.com/aws/aws-sdk-go v1.25.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.36 h1:4+TL/Y2G5hsR1zdfHmjNG1ou1WEqsSWk8v7m1GaDKyo= github.com/aws/aws-sdk-go v1.25.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -62,13 +80,19 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw1w= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= +github.com/caddyserver/caddy v1.0.4 h1:wwuGSkUHo6RZ3oMpeTt7J09WBB87X5o+IZN4dKehcQE= +github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cloudflare-go v0.10.2/go.mod h1:qhVI5MKwBGhdNU89ZRz2plgYutcJ5PCekLxXn56w6SY= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9W4dScCwdRESOacCCdF3FkuB5pBIOv1U= @@ -81,15 +105,20 @@ github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpu/goacmedns v0.0.1/go.mod h1:sesf/pNnCYwUevQEQfEwY0Y3DydlQWSGZbaMElOWxok= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decker502/dnspod-go v0.2.0/go.mod h1:qsurYu1FgxcDwfSwXJdLt4kRsBLZeosEb9uq4Sy+08g= github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnsimple/dnsimple-go v0.30.0/go.mod h1:O5TJ0/U6r7AfT8niYNlmohpLbCSG+c71tQlGr9SeGrg= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4= github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -108,9 +137,11 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 h1:QdyRyGZWLEvJG5Kw3VcVJvhXJ5tZ1MkRgqpJOEZSySM= github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= @@ -118,6 +149,11 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= +github.com/go-acme/lego/v3 v3.1.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE= +github.com/go-acme/lego/v3 v3.2.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE= +github.com/go-cmd/cmd v1.0.5/go.mod h1:y8q8qlK5wQibcw63djSl/ntiHUHXHGdCkPk0j4QeW4s= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-ini/ini v1.44.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -125,10 +161,13 @@ github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80n github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -138,6 +177,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -150,6 +190,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -168,6 +209,11 @@ github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd h1:tkA3C/XTk8iACLOlTez37pL+0iGSYkkRGKdXgJ6ZylM= github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -176,17 +222,21 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df/go.mod h1:QMZY7/J/KSQEhKWFeDesPjMj+wCHReeknARU3wqlyN4= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -200,16 +250,19 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -221,22 +274,37 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/labbsr0x/bindman-dns-webhook v1.0.2/go.mod h1:p6b+VCXIR8NYKpDr8/dg1HKfQoRHCdcsROXKvmoehKA= +github.com/labbsr0x/goh v1.0.1/go.mod h1:8K2UhVoaWXcCU7Lxoa2omWnC8gyW8px7/lmO61c027w= +github.com/linode/linodego v0.10.0/go.mod h1:cziNP7pbvE3mXIPneHj0oRY8L1WtGEIKlZ8LANE4eXA= +github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVLEIG/i5J9cyixzQ= github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= +github.com/lucas-clemente/quic-go v0.13.1/go.mod h1:Vn3/Fb0/77b02SGhQk36KzOUmXgVpFfizUfW5WMaqyU= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= +github.com/marten-seemann/chacha20 v0.2.0/go.mod h1:HSdjFau7GzYRj+ahFNwsO3ouVJr1HFkWoEwNDb4TMtE= +github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/marten-seemann/qtls v0.4.1/go.mod h1:pxVXcHHw1pNIt8Qo0pwSYQEoZ8yYOOPXTCZLQQunvRc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJC3+BNM6LJs1x21rivK7yxfTZMAuY2s= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= +github.com/mholt/certmagic v0.8.3/go.mod h1:91uJzK5K8IWtYQqTi5R2tsxV1pCde+wdGfaRaOZi6aQ= +github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -245,13 +313,21 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/nrdcg/auroradns v1.0.0/go.mod h1:6JPXKzIRzZzMqtTDgueIhTi6rFf1QvYE/HzqidhOhjw= +github.com/nrdcg/goinwx v0.6.1/go.mod h1:XPiut7enlbEdntAqalBIqcYcTEVhpv/dKWgDCX2SwKQ= +github.com/nrdcg/namesilo v0.2.1/go.mod h1:lwMvfQTyYq+BbjJd30ylEG4GPSS6PII0Tia4rRpRiyw= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= @@ -260,6 +336,9 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014/go.mod h1:joRatxRJaZBsY3JAOEMcoOp05CnZzsx4scTxi95DHyQ= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= @@ -271,31 +350,46 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKcyumwBO6qip7RNQ5r77yrssm9bfCowcLEBcU5IA= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sacloud/libsacloud v1.26.1/go.mod h1:79ZwATmHLIFZIMd7sxA3LwzVy/B77uj3LDoToVTxDoQ= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -309,18 +403,29 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/timewasted/linode v0.0.0-20160829202747-37e84520dcf7/go.mod h1:imsgLplxEC/etjIhdr3dNzV3JeT27LbVu5pYWm0JCBY= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/transip/gotransip v0.0.0-20190812104329-6d8d9179b66f/go.mod h1:i0f4R4o2HM0m3DZYQWsj6/MEowD57VzoH0v3d7igeFY= +github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vultr/govultr v0.1.4/go.mod h1:9H008Uxr/C4vFNGLqKx232C206GL0PBHzOP0809bGNA= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a h1:kw8SHTZWndtiiC6ht2gBebCOGycQHLGERawMZljmbAY= go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -328,8 +433,10 @@ go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -338,6 +445,9 @@ golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4= @@ -352,6 +462,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -361,6 +472,7 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -373,6 +485,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823 h1:Ypyv6BNJh07T1pUSrehkLemqPKXhus2MkfktJ91kRh4= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -386,11 +499,13 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180622082034-63fc586f45fe/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -404,7 +519,9 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -420,13 +537,16 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -436,8 +556,10 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18 h1:xFbv3LvlvQAmbNJFCBKRv1C golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -446,6 +568,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -454,7 +577,9 @@ google.golang.org/genproto v0.0.0-20190626174449-989357319d63 h1:UsSJe9fhWNSz6em google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -470,20 +595,27 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw= +gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 54bd9bb0ac882f587eebdecdacb162f6c44b8464 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2019 06:57:23 -0800 Subject: [PATCH 277/324] build(deps): bump google.golang.org/api from 0.13.0 to 0.14.0 (#3462) Bumps [google.golang.org/api](https://github.com/google/google-api-go-client) from 0.13.0 to 0.14.0. - [Release notes](https://github.com/google/google-api-go-client/releases) - [Changelog](https://github.com/googleapis/google-api-go-client/blob/master/CHANGES.md) - [Commits](https://github.com/google/google-api-go-client/compare/v0.13.0...v0.14.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 394f42e02e4..bb26272cfc7 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc golang.org/x/net v0.0.0-20191003171128-d98b1b443823 // indirect golang.org/x/sys v0.0.0-20191010194322-b09406accb47 - google.golang.org/api v0.13.0 + google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 80deb61ca59..5eb2255aa33 100644 --- a/go.sum +++ b/go.sum @@ -562,6 +562,8 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 65cae54ddfc5256b4133d0ff9f1e9fbe99579d77 Mon Sep 17 00:00:00 2001 From: Chris O'Haver Date: Mon, 18 Nov 2019 11:10:25 -0500 Subject: [PATCH 278/324] Update plugin.md (#3469) --- plugin.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin.md b/plugin.md index 915ff8fc269..b85a4c28a9e 100644 --- a/plugin.md +++ b/plugin.md @@ -67,7 +67,7 @@ works, and implement the `ready.Readiness` interface. ## Opening Sockets See the plugin/pkg/reuseport for `Listen` and `ListenPacket` functions. Using these functions makes -you plugin handle reload events better. +your plugin handle reload events better. ## Documentation From 768ca99c571ce042a1899defb553db0123d84801 Mon Sep 17 00:00:00 2001 From: Zou Nengren Date: Wed, 20 Nov 2019 20:14:37 +0800 Subject: [PATCH 279/324] Fix reloading in health and ready (#3473) Signed-off-by: zouyee --- plugin/health/health.go | 4 ++-- plugin/ready/ready.go | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/plugin/health/health.go b/plugin/health/health.go index 0db68eb7251..b5b4b95a268 100644 --- a/plugin/health/health.go +++ b/plugin/health/health.go @@ -8,6 +8,7 @@ import ( "time" clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/plugin/pkg/reuseport" ) var log = clog.NewWithPlugin("health") @@ -29,8 +30,7 @@ func (h *health) OnStartup() error { h.Addr = ":8080" } h.stop = make(chan bool) - - ln, err := net.Listen("tcp", h.Addr) + ln, err := reuseport.Listen("tcp", h.Addr) if err != nil { return err } diff --git a/plugin/ready/ready.go b/plugin/ready/ready.go index f40682041df..326d399551d 100644 --- a/plugin/ready/ready.go +++ b/plugin/ready/ready.go @@ -12,6 +12,7 @@ import ( clog "github.com/coredns/coredns/plugin/pkg/log" "github.com/coredns/coredns/plugin/pkg/uniq" + "github.com/coredns/coredns/plugin/pkg/reuseport" ) var ( @@ -30,7 +31,7 @@ type ready struct { } func (rd *ready) onStartup() error { - ln, err := net.Listen("tcp", rd.Addr) + ln, err := reuseport.Listen("tcp", rd.Addr) if err != nil { return err } From d18f4c9e516d7cd8d41d5130b1ef9467408f8999 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sun, 24 Nov 2019 18:42:59 +0000 Subject: [PATCH 280/324] mechanical changes (#3481) * make -f Makefile.doc * go mod tidy * go generate trying to prevent another dirty build/release. Signed-off-by: Miek Gieben --- go.mod | 6 +----- go.sum | 42 +++++++----------------------------------- man/coredns-clouddns.7 | 4 ++-- man/coredns-sign.7 | 2 +- 4 files changed, 11 insertions(+), 43 deletions(-) diff --git a/go.mod b/go.mod index bb26272cfc7..d1ed096696a 100644 --- a/go.mod +++ b/go.mod @@ -4,13 +4,11 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect - github.com/Azure/azure-sdk-for-go v31.1.0+incompatible + github.com/Azure/azure-sdk-for-go v32.4.0+incompatible github.com/Azure/go-autorest/autorest v0.9.2 github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 - github.com/Azure/go-autorest/autorest/to v0.2.0 // indirect github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect - github.com/apache/thrift v0.12.0 // indirect github.com/aws/aws-sdk-go v1.25.36 github.com/caddyserver/caddy v1.0.4 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe @@ -22,11 +20,9 @@ require ( github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/golang/protobuf v1.3.2 github.com/googleapis/gnostic v0.2.0 // indirect - github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/imdario/mergo v0.3.7 // indirect github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 - github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/miekg/dns v1.1.22 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect diff --git a/go.sum b/go.sum index 5eb2255aa33..12d439fb538 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,7 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/Azure/azure-sdk-for-go v31.1.0+incompatible h1:5SzgnfAvUBdBwNTN23WLfZoCt/rGhLvd7QdCAaFXgY4= -github.com/Azure/azure-sdk-for-go v31.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v32.4.0+incompatible h1:1JP8SKfroEakYiQU2ZyPDosh8w2Tg9UopKt88VyQPt4= github.com/Azure/azure-sdk-for-go v32.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezqVhr2h6t9LJIEKVO0= github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -67,8 +66,6 @@ github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1 github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.31 h1:14mdh3HsTgRekePPkYcCbAaEXJknc3mN7f4XfsiMMDA= -github.com/aws/aws-sdk-go v1.25.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.36 h1:4+TL/Y2G5hsR1zdfHmjNG1ou1WEqsSWk8v7m1GaDKyo= github.com/aws/aws-sdk-go v1.25.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= @@ -77,19 +74,13 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw1w= -github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/caddyserver/caddy v1.0.4 h1:wwuGSkUHo6RZ3oMpeTt7J09WBB87X5o+IZN4dKehcQE= github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM= -github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2/go.mod h1:qhVI5MKwBGhdNU89ZRz2plgYutcJ5PCekLxXn56w6SY= @@ -147,8 +138,6 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-acme/lego/v3 v3.1.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE= github.com/go-acme/lego/v3 v3.2.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE= github.com/go-cmd/cmd v1.0.5/go.mod h1:y8q8qlK5wQibcw63djSl/ntiHUHXHGdCkPk0j4QeW4s= @@ -207,8 +196,7 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= -github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd h1:tkA3C/XTk8iACLOlTez37pL+0iGSYkkRGKdXgJ6ZylM= -github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -229,10 +217,10 @@ github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -278,14 +266,9 @@ github.com/labbsr0x/bindman-dns-webhook v1.0.2/go.mod h1:p6b+VCXIR8NYKpDr8/dg1HK github.com/labbsr0x/goh v1.0.1/go.mod h1:8K2UhVoaWXcCU7Lxoa2omWnC8gyW8px7/lmO61c027w= github.com/linode/linodego v0.10.0/go.mod h1:cziNP7pbvE3mXIPneHj0oRY8L1WtGEIKlZ8LANE4eXA= github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVLEIG/i5J9cyixzQ= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go v0.13.1/go.mod h1:Vn3/Fb0/77b02SGhQk36KzOUmXgVpFfizUfW5WMaqyU= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/marten-seemann/chacha20 v0.2.0/go.mod h1:HSdjFau7GzYRj+ahFNwsO3ouVJr1HFkWoEwNDb4TMtE= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/marten-seemann/qtls v0.4.1/go.mod h1:pxVXcHHw1pNIt8Qo0pwSYQEoZ8yYOOPXTCZLQQunvRc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -295,8 +278,6 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2 h1:xKE9kZ5C8gelJC3+BNM6LJs1x21rivK7yxfTZMAuY2s= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/mholt/certmagic v0.8.3/go.mod h1:91uJzK5K8IWtYQqTi5R2tsxV1pCde+wdGfaRaOZi6aQ= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= @@ -323,13 +304,11 @@ github.com/nrdcg/namesilo v0.2.1/go.mod h1:lwMvfQTyYq+BbjJd30ylEG4GPSS6PII0Tia4r github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= @@ -403,6 +382,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/timewasted/linode v0.0.0-20160829202747-37e84520dcf7/go.mod h1:imsgLplxEC/etjIhdr3dNzV3JeT27LbVu5pYWm0JCBY= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= @@ -439,9 +419,7 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -474,7 +452,6 @@ golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -506,10 +483,8 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -537,6 +512,7 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w= golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -560,8 +536,6 @@ google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMt google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -608,8 +582,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw= gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= diff --git a/man/coredns-clouddns.7 b/man/coredns-clouddns.7 index c09af441edc..a0cf3b23ac5 100644 --- a/man/coredns-clouddns.7 +++ b/man/coredns-clouddns.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CLOUDDNS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CLOUDDNS" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -76,7 +76,7 @@ Enable clouddns with fallthrough: .nf example.org { - clouddns example.org.:gcp\-example\-project:example\-zone clouddns example.com.:gcp\-example\-project:example\-zone\-2 { + clouddns example.org.:gcp\-example\-project:example\-zone example.com.:gcp\-example\-project:example\-zone\-2 { fallthrough example.gov. } } diff --git a/man/coredns-sign.7 b/man/coredns-sign.7 index 4f7d02c2845..bed4ff11d6d 100644 --- a/man/coredns-sign.7 +++ b/man/coredns-sign.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-SIGN" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-SIGN" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP From 029b8c11f1a48a0c5a2a37e7a94a0bb13ecc00c2 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2019 07:59:32 +0000 Subject: [PATCH 281/324] build(deps): bump github.com/Azure/azure-sdk-for-go (#3482) Bumps [github.com/Azure/azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go) from 32.4.0+incompatible to 32.6.0+incompatible. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/v32.4.0...v32.6.0) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d1ed096696a..d993b3fe091 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect - github.com/Azure/azure-sdk-for-go v32.4.0+incompatible + github.com/Azure/azure-sdk-for-go v32.6.0+incompatible github.com/Azure/go-autorest/autorest v0.9.2 github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 github.com/DataDog/datadog-go v2.2.0+incompatible // indirect diff --git a/go.sum b/go.sum index 12d439fb538..0c0c4ffb94f 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,8 @@ cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercm contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= github.com/Azure/azure-sdk-for-go v32.4.0+incompatible h1:1JP8SKfroEakYiQU2ZyPDosh8w2Tg9UopKt88VyQPt4= github.com/Azure/azure-sdk-for-go v32.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v32.6.0+incompatible h1:PgaVceWF5idtJajyt1rzq1cep6eRPJ8+8hs4GnNzTo0= +github.com/Azure/azure-sdk-for-go v32.6.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezqVhr2h6t9LJIEKVO0= github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= From d56dbffa95963f1e0ca14c0f75e42c816c0783a8 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2019 07:59:52 +0000 Subject: [PATCH 282/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.36 to 1.25.41 (#3483) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.36 to 1.25.41. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.36...v1.25.41) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d993b3fe091..c5918c17edc 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect - github.com/aws/aws-sdk-go v1.25.36 + github.com/aws/aws-sdk-go v1.25.41 github.com/caddyserver/caddy v1.0.4 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 0c0c4ffb94f..9bec1f3c442 100644 --- a/go.sum +++ b/go.sum @@ -70,6 +70,8 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.36 h1:4+TL/Y2G5hsR1zdfHmjNG1ou1WEqsSWk8v7m1GaDKyo= github.com/aws/aws-sdk-go v1.25.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.41 h1:/hj7nZ0586wFqpwjNpzWiUTwtaMgxAZNZKHay80MdXw= +github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 24176a97e65e65a768e4e06b192f38da511d1025 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 29 Nov 2019 13:17:05 +0000 Subject: [PATCH 283/324] Move to CODEOWNERS (#3489) * Move to CODEOWNERS No change in who own what; just a move to CODEOWNERS. This allows dreck cleanups. Added .dreck.yaml for alias and exec. Fixes: #3486 Signed-off-by: Miek Gieben * stickler bot Signed-off-by: Miek Gieben * sort the file Signed-off-by: Miek Gieben --- .dreck.yaml | 11 +++++ CODEOWNERS | 44 ++++++++++++++++++ OWNERS | 32 ------------- owners_generate.go | 92 ++++++++++++++++++-------------------- plugin/acl/OWNERS | 6 --- plugin/any/OWNERS | 4 -- plugin/auto/OWNERS | 5 --- plugin/autopath/OWNERS | 6 --- plugin/azure/OWNERS | 8 ---- plugin/bind/OWNERS | 4 -- plugin/bufsize/OWNERS | 4 -- plugin/cache/OWNERS | 6 --- plugin/cancel/OWNERS | 4 -- plugin/chaos/OWNERS | 4 -- plugin/clouddns/OWNERS | 6 --- plugin/dnssec/OWNERS | 6 --- plugin/dnstap/OWNERS | 6 --- plugin/erratic/OWNERS | 4 -- plugin/errors/OWNERS | 4 -- plugin/etcd/OWNERS | 6 --- plugin/file/OWNERS | 7 --- plugin/forward/OWNERS | 10 ----- plugin/grpc/OWNERS | 6 --- plugin/health/OWNERS | 6 --- plugin/hosts/OWNERS | 6 --- plugin/k8s_external/OWNERS | 4 -- plugin/kubernetes/OWNERS | 14 ------ plugin/loadbalance/OWNERS | 4 -- plugin/log/OWNERS | 6 --- plugin/loop/OWNERS | 6 --- plugin/metadata/OWNERS | 6 --- plugin/metrics/OWNERS | 9 ---- plugin/nsid/OWNERS | 4 -- plugin/pprof/OWNERS | 4 -- plugin/reload/OWNERS | 4 -- plugin/rewrite/OWNERS | 6 --- plugin/root/OWNERS | 4 -- plugin/route53/OWNERS | 6 --- plugin/secondary/OWNERS | 6 --- plugin/template/OWNERS | 4 -- plugin/tls/OWNERS | 4 -- plugin/trace/OWNERS | 4 -- plugin/transfer/OWNERS | 6 --- plugin/whoami/OWNERS | 6 --- 44 files changed, 98 insertions(+), 306 deletions(-) create mode 100644 .dreck.yaml create mode 100644 CODEOWNERS delete mode 100644 OWNERS delete mode 100644 plugin/acl/OWNERS delete mode 100644 plugin/any/OWNERS delete mode 100644 plugin/auto/OWNERS delete mode 100644 plugin/autopath/OWNERS delete mode 100644 plugin/azure/OWNERS delete mode 100644 plugin/bind/OWNERS delete mode 100644 plugin/bufsize/OWNERS delete mode 100644 plugin/cache/OWNERS delete mode 100644 plugin/cancel/OWNERS delete mode 100644 plugin/chaos/OWNERS delete mode 100644 plugin/clouddns/OWNERS delete mode 100644 plugin/dnssec/OWNERS delete mode 100644 plugin/dnstap/OWNERS delete mode 100644 plugin/erratic/OWNERS delete mode 100644 plugin/errors/OWNERS delete mode 100644 plugin/etcd/OWNERS delete mode 100644 plugin/file/OWNERS delete mode 100644 plugin/forward/OWNERS delete mode 100644 plugin/grpc/OWNERS delete mode 100644 plugin/health/OWNERS delete mode 100644 plugin/hosts/OWNERS delete mode 100644 plugin/k8s_external/OWNERS delete mode 100644 plugin/kubernetes/OWNERS delete mode 100644 plugin/loadbalance/OWNERS delete mode 100644 plugin/log/OWNERS delete mode 100644 plugin/loop/OWNERS delete mode 100644 plugin/metadata/OWNERS delete mode 100644 plugin/metrics/OWNERS delete mode 100644 plugin/nsid/OWNERS delete mode 100644 plugin/pprof/OWNERS delete mode 100644 plugin/reload/OWNERS delete mode 100644 plugin/rewrite/OWNERS delete mode 100644 plugin/root/OWNERS delete mode 100644 plugin/route53/OWNERS delete mode 100644 plugin/secondary/OWNERS delete mode 100644 plugin/template/OWNERS delete mode 100644 plugin/tls/OWNERS delete mode 100644 plugin/trace/OWNERS delete mode 100644 plugin/transfer/OWNERS delete mode 100644 plugin/whoami/OWNERS diff --git a/.dreck.yaml b/.dreck.yaml new file mode 100644 index 00000000000..12c8a4c4be8 --- /dev/null +++ b/.dreck.yaml @@ -0,0 +1,11 @@ +features: + - aliases + - exec + +aliases: + - | + /plugin (.*) -> /label plugin/$1 + - | + /wai -> /label works as intended + - | + /release (.*) -> /exec /opt/bin/release-coredns $1 diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000000..67c24cd5df9 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,44 @@ +# @miekg, miek@miek.nl, project lead: 11/11/2020 + +* @bradbeam @chrisohaver @dilyevsky @fastest963 @greenpau @grobie @isolus @johnbelamaric @miekg @pmoroney @rajansandeep @stp-ip @superq @yongtang + +/plugin/acl/ @miekg @ihac +/plugin/any/ @miekg +/plugin/auto/ @miekg @stp-ip +/plugin/autopath/ @chrisohaver @miekg +/plugin/azure/ @miekg @yongtang @darshanime +/plugin/bind/ @miekg +/plugin/bufsize/ @ykhr53 +/plugin/cache/ @grobie @miekg +/plugin/cancel/ @miekg +/plugin/chaos/ @miekg +/plugin/clouddns/ @miekg @yongtang +/plugin/dnssec/ @isolus @miekg +/plugin/dnstap/ @varyoo @yongtang +/plugin/erratic/ @miekg +/plugin/errors/ @miekg +/plugin/etcd/ @miekg @nitisht +/plugin/file/ @miekg @yongtang @stp-ip +/plugin/forward/ @grobie @johnbelamaric @miekg @rdrozhdzh +/plugin/grpc/ @inigohu @miekg +/plugin/health/ @fastest963 @miekg +/plugin/hosts/ @johnbelamaric @pmoroney +/plugin/k8s_external/ @miekg +/plugin/kubernetes/ @bradbeam @chrisohaver @johnbelamaric @miekg @rajansandeep @yongtang +/plugin/loadbalance/ @miekg +/plugin/log/ @miekg @nchrisdk +/plugin/loop/ @miekg @chrisohaver +/plugin/metadata/ @ekleiner @miekg +/plugin/metrics/ @fastest963 @miekg @superq @greenpau +/plugin/nsid/ @yongtang +/plugin/pprof/ @miekg +/plugin/reload/ @johnbelamaric +/plugin/rewrite/ @greenpau @johnbelamaric +/plugin/root/ @miekg +/plugin/route53/ @yongtang @dilyevsky +/plugin/secondary/ @bradbeam @miekg +/plugin/template/ @rtreffer +/plugin/tls/ @johnbelamaric +/plugin/trace/ @johnbelamaric +/plugin/transfer/ @miekg @chrisohaver +/plugin/whoami/ @miekg @chrisohaver diff --git a/OWNERS b/OWNERS deleted file mode 100644 index 7807a965a9c..00000000000 --- a/OWNERS +++ /dev/null @@ -1,32 +0,0 @@ -approvers: - - bradbeam - - chrisohaver # Infoblox | cohaver@infoblox.com - - dilyevsky - - fastest963 - - greenpau - - grobie - - isolus - - johnbelamaric - - miekg # miek@miek.nl | project lead: 11/11/2020 - - pmoroney - - rajansandeep - - stp-ip - - superq - - yongtang - -features: - - comments - - reviewers - - aliases - - branches - - exec - -aliases: - - | - /plugin: (.*) -> /label add: plugin/$1 - - | - /approve -> /lgtm - - | - /wai -> /label add: works as intended - - | - /release: (.*) -> /exec: /opt/bin/release-coredns $1 diff --git a/owners_generate.go b/owners_generate.go index 10160febdac..cc62082950e 100644 --- a/owners_generate.go +++ b/owners_generate.go @@ -5,58 +5,29 @@ package main import ( + "bufio" "fmt" "io/ioutil" "log" "os" - "path" - "path/filepath" "sort" - - "gopkg.in/yaml.v2" + "strings" ) func main() { - o := map[string]struct{}{} - // top-level OWNERS file - o, err := owners("OWNERS", o) + o, err := owners("CODEOWNERS") if err != nil { log.Fatal(err) } - // each of the plugins, in case someone is not in the top-level one - err = filepath.Walk("plugin", - func(p string, i os.FileInfo, err error) error { - if err != nil { - return err - } - if i.IsDir() { - return nil - } - if path.Base(p) != "OWNERS" { - return nil - } - o, err = owners(p, o) - return err - }) - if err != nil { - log.Fatal(err) - } - - // sort it and format it - list := []string{} - for k := range o { - list = append(list, k) - } - sort.Strings(list) golist := `package chaos // Owners are all GitHub handlers of all maintainers. var Owners = []string{` c := ", " - for i, a := range list { - if i == len(list)-1 { + for i, a := range o { + if i == len(o)-1 { c = "}" } golist += fmt.Sprintf("%q%s", a, c) @@ -70,27 +41,50 @@ var Owners = []string{` return } -// owners parses a owner file without knowing a whole lot about its structure. -func owners(path string, owners map[string]struct{}) (map[string]struct{}, error) { - file, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - c := yaml.MapSlice{} - err = yaml.Unmarshal(file, &c) +func owners(path string) ([]string, error) { + // simple line, by line based format + // + // # In this example, @doctocat owns any files in the build/logs + // # directory at the root of the repository and any of its + // # subdirectories. + // /build/logs/ @doctocat + f, err := os.Open(path) if err != nil { return nil, err } - for _, mi := range c { - key, ok := mi.Key.(string) - if !ok { + scanner := bufio.NewScanner(f) + users := map[string]struct{}{} + for scanner.Scan() { + text := scanner.Text() + if len(text) == 0 { + continue + } + if text[0] == '#' { + continue + } + ele := strings.Fields(text) + if len(ele) == 0 { continue } - if key == "approvers" { - for _, k := range mi.Value.([]interface{}) { - owners[k.(string)] = struct{}{} + + // ok ele[0] is the path, the rest are (in our case) github usernames prefixed with @ + for _, s := range ele[1:] { + if len(s) <= 1 { + continue } + users[s[1:]] = struct{}{} + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + u := []string{} + for k := range users { + if strings.HasPrefix(k, "@") { + k = k[1:] } + u = append(u, k) } - return owners, nil + sort.Strings(u) + return u, nil } diff --git a/plugin/acl/OWNERS b/plugin/acl/OWNERS deleted file mode 100644 index f43391c4352..00000000000 --- a/plugin/acl/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - ihac -approvers: - - miekg - - ihac diff --git a/plugin/any/OWNERS b/plugin/any/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/any/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/auto/OWNERS b/plugin/auto/OWNERS deleted file mode 100644 index 3fc6bad85ed..00000000000 --- a/plugin/auto/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -reviewers: - - miekg - - stp-ip -approvers: - - miekg diff --git a/plugin/autopath/OWNERS b/plugin/autopath/OWNERS deleted file mode 100644 index 187c629c943..00000000000 --- a/plugin/autopath/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - chrisohaver - - miekg -approvers: - - chrisohaver - - miekg diff --git a/plugin/azure/OWNERS b/plugin/azure/OWNERS deleted file mode 100644 index c30e67ae3fd..00000000000 --- a/plugin/azure/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -reviewers: - - miekg - - yongtang - - darshanime -approvers: - - miekg - - yongtang - - darshanime diff --git a/plugin/bind/OWNERS b/plugin/bind/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/bind/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/bufsize/OWNERS b/plugin/bufsize/OWNERS deleted file mode 100644 index e40116eae1a..00000000000 --- a/plugin/bufsize/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - ykhr53 -approvers: - - ykhr53 \ No newline at end of file diff --git a/plugin/cache/OWNERS b/plugin/cache/OWNERS deleted file mode 100644 index 6b9f2f0df23..00000000000 --- a/plugin/cache/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - grobie - - miekg -approvers: - - grobie - - miekg diff --git a/plugin/cancel/OWNERS b/plugin/cancel/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/cancel/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/chaos/OWNERS b/plugin/chaos/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/chaos/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/clouddns/OWNERS b/plugin/clouddns/OWNERS deleted file mode 100644 index 716e8641ad1..00000000000 --- a/plugin/clouddns/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - yongtang -approvers: - - miekg - - yongtang diff --git a/plugin/dnssec/OWNERS b/plugin/dnssec/OWNERS deleted file mode 100644 index 1bdb8e3d565..00000000000 --- a/plugin/dnssec/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - isolus - - miekg -approvers: - - isolus - - miekg diff --git a/plugin/dnstap/OWNERS b/plugin/dnstap/OWNERS deleted file mode 100644 index 6f67242973d..00000000000 --- a/plugin/dnstap/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - varyoo - - yongtang -approvers: - - varyoo - - yongtang diff --git a/plugin/erratic/OWNERS b/plugin/erratic/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/erratic/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/errors/OWNERS b/plugin/errors/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/errors/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/etcd/OWNERS b/plugin/etcd/OWNERS deleted file mode 100644 index 256b53f9987..00000000000 --- a/plugin/etcd/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - nitisht -approvers: - - miekg - - nitisht diff --git a/plugin/file/OWNERS b/plugin/file/OWNERS deleted file mode 100644 index b69d5c74f66..00000000000 --- a/plugin/file/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -reviewers: - - miekg - - yongtang - - stp-ip -approvers: - - miekg - - yongtang diff --git a/plugin/forward/OWNERS b/plugin/forward/OWNERS deleted file mode 100644 index 201f579921c..00000000000 --- a/plugin/forward/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -reviewers: - - grobie - - johnbelamaric - - miekg - - rdrozhdzh -approvers: - - grobie - - johnbelamaric - - rdrozhdzh - - miekg diff --git a/plugin/grpc/OWNERS b/plugin/grpc/OWNERS deleted file mode 100644 index 7b778f5bdb4..00000000000 --- a/plugin/grpc/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - inigohu - - miekg -approvers: - - inigohu - - miekg diff --git a/plugin/health/OWNERS b/plugin/health/OWNERS deleted file mode 100644 index d909fd4511c..00000000000 --- a/plugin/health/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - fastest963 - - miekg -approvers: - - fastest963 - - miekg diff --git a/plugin/hosts/OWNERS b/plugin/hosts/OWNERS deleted file mode 100644 index ae6484f9d45..00000000000 --- a/plugin/hosts/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - johnbelamaric - - pmoroney -approvers: - - johnbelamaric - - pmoroney diff --git a/plugin/k8s_external/OWNERS b/plugin/k8s_external/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/k8s_external/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/kubernetes/OWNERS b/plugin/kubernetes/OWNERS deleted file mode 100644 index e662359c926..00000000000 --- a/plugin/kubernetes/OWNERS +++ /dev/null @@ -1,14 +0,0 @@ -reviewers: - - bradbeam - - chrisohaver - - johnbelamaric - - miekg - - rajansandeep - - yongtang -approvers: - - bradbeam - - chrisohaver - - johnbelamaric - - miekg - - rajansandeep - - yongtang diff --git a/plugin/loadbalance/OWNERS b/plugin/loadbalance/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/loadbalance/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/log/OWNERS b/plugin/log/OWNERS deleted file mode 100644 index 06032ae2e09..00000000000 --- a/plugin/log/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - nchrisdk -approvers: - - miekg - - nchrisdk diff --git a/plugin/loop/OWNERS b/plugin/loop/OWNERS deleted file mode 100644 index 3a4ef23a1f6..00000000000 --- a/plugin/loop/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - chrisohaver -approvers: - - miekg - - chrisohaver diff --git a/plugin/metadata/OWNERS b/plugin/metadata/OWNERS deleted file mode 100644 index 6d13ad9f14f..00000000000 --- a/plugin/metadata/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - ekleiner - - miekg -approvers: - - ekleiner - - miekg diff --git a/plugin/metrics/OWNERS b/plugin/metrics/OWNERS deleted file mode 100644 index 1b071c69eeb..00000000000 --- a/plugin/metrics/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -reviewers: - - fastest963 - - miekg - - superq - - greenpau -approvers: - - fastest963 - - miekg - - superq diff --git a/plugin/nsid/OWNERS b/plugin/nsid/OWNERS deleted file mode 100644 index 4e0ca6d99d2..00000000000 --- a/plugin/nsid/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - yongtang -approvers: - - yongtang diff --git a/plugin/pprof/OWNERS b/plugin/pprof/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/pprof/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/reload/OWNERS b/plugin/reload/OWNERS deleted file mode 100644 index f7f9ca271ae..00000000000 --- a/plugin/reload/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - johnbelamaric -approvers: - - johnbelamaric diff --git a/plugin/rewrite/OWNERS b/plugin/rewrite/OWNERS deleted file mode 100644 index b77031d8616..00000000000 --- a/plugin/rewrite/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - greenpau - - johnbelamaric -approvers: - - greenpau - - johnbelamaric diff --git a/plugin/root/OWNERS b/plugin/root/OWNERS deleted file mode 100644 index eee46f68652..00000000000 --- a/plugin/root/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - miekg -approvers: - - miekg diff --git a/plugin/route53/OWNERS b/plugin/route53/OWNERS deleted file mode 100644 index aba4fe3da80..00000000000 --- a/plugin/route53/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - yongtang - - dilyevsky -approvers: - - yongtang - - dilyevsky diff --git a/plugin/secondary/OWNERS b/plugin/secondary/OWNERS deleted file mode 100644 index 252bba86cd9..00000000000 --- a/plugin/secondary/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - bradbeam - - miekg -approvers: - - bradbeam - - miekg diff --git a/plugin/template/OWNERS b/plugin/template/OWNERS deleted file mode 100644 index b97eccf6238..00000000000 --- a/plugin/template/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - rtreffer -approvers: - - rtreffer diff --git a/plugin/tls/OWNERS b/plugin/tls/OWNERS deleted file mode 100644 index f7f9ca271ae..00000000000 --- a/plugin/tls/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - johnbelamaric -approvers: - - johnbelamaric diff --git a/plugin/trace/OWNERS b/plugin/trace/OWNERS deleted file mode 100644 index f7f9ca271ae..00000000000 --- a/plugin/trace/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: - - johnbelamaric -approvers: - - johnbelamaric diff --git a/plugin/transfer/OWNERS b/plugin/transfer/OWNERS deleted file mode 100644 index 3a4ef23a1f6..00000000000 --- a/plugin/transfer/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - chrisohaver -approvers: - - miekg - - chrisohaver diff --git a/plugin/whoami/OWNERS b/plugin/whoami/OWNERS deleted file mode 100644 index 3a4ef23a1f6..00000000000 --- a/plugin/whoami/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -reviewers: - - miekg - - chrisohaver -approvers: - - miekg - - chrisohaver From b4df2d0d4c2d2014b113f9f8caf1f1b2ed6dc192 Mon Sep 17 00:00:00 2001 From: Gonzalo Paniagua Javier Date: Fri, 29 Nov 2019 11:17:50 -0400 Subject: [PATCH 284/324] Add a serve_stale option for plugin/cache (#3468) Automatically submitted. --- plugin/cache/README.md | 6 ++++ plugin/cache/cache.go | 2 ++ plugin/cache/cache_test.go | 54 ++++++++++++++++++++++++++++++-- plugin/cache/handler.go | 63 +++++++++++++++++++++++++++++++------- plugin/cache/setup.go | 19 +++++++++++- plugin/cache/setup_test.go | 37 ++++++++++++++++++++++ 6 files changed, 166 insertions(+), 15 deletions(-) diff --git a/plugin/cache/README.md b/plugin/cache/README.md index 2958f90c93d..14636e861bb 100644 --- a/plugin/cache/README.md +++ b/plugin/cache/README.md @@ -34,6 +34,7 @@ cache [TTL] [ZONES...] { success CAPACITY [TTL] [MINTTL] denial CAPACITY [TTL] [MINTTL] prefetch AMOUNT [[DURATION] [PERCENTAGE%]] + serve_stale [DURATION] } ~~~ @@ -50,6 +51,10 @@ cache [TTL] [ZONES...] { **DURATION** defaults to 1m. Prefetching will happen when the TTL drops below **PERCENTAGE**, which defaults to `10%`, or latest 1 second before TTL expiration. Values should be in the range `[10%, 90%]`. Note the percent sign is mandatory. **PERCENTAGE** is treated as an `int`. +* `serve_stale`, when serve\_stale is set, cache always will serve an expired entry to a client if there is one + available. When this happens, cache will attempt to refresh the cache entry after sending the expired cache + entry to the client. The responses have a TTL of 0. **DURATION** is how far back to consider + stale responses as fresh. The default duration is 1h. ## Capacity and Eviction @@ -69,6 +74,7 @@ If monitoring is enabled (via the *prometheus* plugin) then the following metric * `coredns_cache_hits_total{server, type}` - Counter of cache hits by cache type. * `coredns_cache_misses_total{server}` - Counter of cache misses. * `coredns_cache_drops_total{server}` - Counter of dropped messages. +* `coredns_cache_served_stale_total{server}` - Counter of requests served from stale cache entries. Cache types are either "denial" or "success". `Server` is the server handling the request, see the metrics plugin for documentation. diff --git a/plugin/cache/cache.go b/plugin/cache/cache.go index 43a40c40921..6b50c51cc67 100644 --- a/plugin/cache/cache.go +++ b/plugin/cache/cache.go @@ -36,6 +36,8 @@ type Cache struct { duration time.Duration percentage int + staleUpTo time.Duration + // Testing. now func() time.Time } diff --git a/plugin/cache/cache_test.go b/plugin/cache/cache_test.go index b3235337274..138458c8f4c 100644 --- a/plugin/cache/cache_test.go +++ b/plugin/cache/cache_test.go @@ -2,10 +2,12 @@ package cache import ( "context" + "fmt" "testing" "time" "github.com/coredns/coredns/plugin" + "github.com/coredns/coredns/plugin/pkg/dnstest" "github.com/coredns/coredns/plugin/pkg/response" "github.com/coredns/coredns/plugin/test" "github.com/coredns/coredns/request" @@ -233,7 +235,7 @@ func TestCacheZeroTTL(t *testing.T) { c := New() c.minpttl = 0 c.minnttl = 0 - c.Next = zeroTTLBackend() + c.Next = ttlBackend(0) req := new(dns.Msg) req.SetQuestion("example.org.", dns.TypeA) @@ -248,6 +250,52 @@ func TestCacheZeroTTL(t *testing.T) { } } +func TestServeFromStaleCache(t *testing.T) { + c := New() + c.Next = ttlBackend(60) + + req := new(dns.Msg) + req.SetQuestion("cached.org.", dns.TypeA) + ctx := context.TODO() + + // Cache example.org. + rec := dnstest.NewRecorder(&test.ResponseWriter{}) + c.staleUpTo = 1 * time.Hour + c.ServeDNS(ctx, rec, req) + if c.pcache.Len() != 1 { + t.Fatalf("Msg with > 0 TTL should have been cached") + } + + // No more backend resolutions, just from cache if available. + c.Next = plugin.HandlerFunc(func(context.Context, dns.ResponseWriter, *dns.Msg) (int, error) { + return 255, nil // Below, a 255 means we tried querying upstream. + }) + + tests := []struct { + name string + futureMinutes int + expectedResult int + }{ + {"cached.org.", 30, 0}, + {"cached.org.", 60, 0}, + {"cached.org.", 70, 255}, + + {"notcached.org.", 30, 255}, + {"notcached.org.", 60, 255}, + {"notcached.org.", 70, 255}, + } + + for i, tt := range tests { + rec := dnstest.NewRecorder(&test.ResponseWriter{}) + c.now = func() time.Time { return time.Now().Add(time.Duration(tt.futureMinutes) * time.Minute) } + r := req.Copy() + r.SetQuestion(tt.name, dns.TypeA) + if ret, _ := c.ServeDNS(ctx, rec, r); ret != tt.expectedResult { + t.Errorf("Test %d: expecting %v; got %v", i, tt.expectedResult, ret) + } + } +} + func BenchmarkCacheResponse(b *testing.B) { c := New() c.prefetch = 1 @@ -286,13 +334,13 @@ func BackendHandler() plugin.Handler { }) } -func zeroTTLBackend() plugin.Handler { +func ttlBackend(ttl int) plugin.Handler { return plugin.HandlerFunc(func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { m := new(dns.Msg) m.SetReply(r) m.Response, m.RecursionAvailable = true, true - m.Answer = []dns.RR{test.A("example.org. 0 IN A 127.0.0.53")} + m.Answer = []dns.RR{test.A(fmt.Sprintf("example.org. %d IN A 127.0.0.53", ttl))} w.WriteMsg(m) return dns.RcodeSuccess, nil }) diff --git a/plugin/cache/handler.go b/plugin/cache/handler.go index 4dc29167ad7..905a98ef491 100644 --- a/plugin/cache/handler.go +++ b/plugin/cache/handler.go @@ -26,19 +26,32 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) server := metrics.WithServer(ctx) - i, found := c.get(now, state, server) - if i != nil && found { - resp := i.toMsg(r, now) - w.WriteMsg(resp) - - if c.shouldPrefetch(i, now) { - go c.doPrefetch(ctx, state, server, i, now) - } - return dns.RcodeSuccess, nil + ttl := 0 + i := c.getIgnoreTTL(now, state, server) + if i != nil { + ttl = i.ttl(now) + } + if i == nil || -ttl >= int(c.staleUpTo.Seconds()) { + crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server} + return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r) } + if ttl < 0 { + servedStale.WithLabelValues(server).Inc() + // Adjust the time to get a 0 TTL in the reply built from a stale item. + now = now.Add(time.Duration(ttl) * time.Second) + go func() { + r := r.Copy() + crr := &ResponseWriter{Cache: c, state: state, server: server, prefetch: true, remoteAddr: w.LocalAddr()} + plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r) + }() + } + resp := i.toMsg(r, now) + w.WriteMsg(resp) - crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server} - return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r) + if c.shouldPrefetch(i, now) { + go c.doPrefetch(ctx, state, server, i, now) + } + return dns.RcodeSuccess, nil } func (c *Cache) doPrefetch(ctx context.Context, state request.Request, server string, i *item, now time.Time) { @@ -83,6 +96,27 @@ func (c *Cache) get(now time.Time, state request.Request, server string) (*item, return nil, false } +// getIgnoreTTL unconditionally returns an item if it exists in the cache. +func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item { + k := hash(state.Name(), state.QType(), state.Do()) + + if i, ok := c.ncache.Get(k); ok { + ttl := i.(*item).ttl(now) + if ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds())) { + cacheHits.WithLabelValues(server, Denial).Inc() + } + return i.(*item) + } + if i, ok := c.pcache.Get(k); ok { + ttl := i.(*item).ttl(now) + if ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds())) { + cacheHits.WithLabelValues(server, Success).Inc() + } + return i.(*item) + } + return nil +} + func (c *Cache) exists(state request.Request) *item { k := hash(state.Name(), state.QType(), state.Do()) if i, ok := c.ncache.Get(k); ok { @@ -129,4 +163,11 @@ var ( Name: "drops_total", Help: "The number responses that are not cached, because the reply is malformed.", }, []string{"server"}) + + servedStale = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: "cache", + Name: "served_stale_total", + Help: "The number of requests served from stale cache entries.", + }, []string{"server"}) ) diff --git a/plugin/cache/setup.go b/plugin/cache/setup.go index e4ca494dbfb..62c5c9d2c74 100644 --- a/plugin/cache/setup.go +++ b/plugin/cache/setup.go @@ -1,6 +1,7 @@ package cache import ( + "errors" "fmt" "strconv" "time" @@ -31,7 +32,7 @@ func setup(c *caddy.Controller) error { c.OnStartup(func() error { metrics.MustRegister(c, cacheSize, cacheHits, cacheMisses, - cachePrefetches, cacheDrops) + cachePrefetches, cacheDrops, servedStale) return nil }) @@ -176,6 +177,22 @@ func cacheParse(c *caddy.Controller) (*Cache, error) { ca.percentage = num } + case "serve_stale": + args := c.RemainingArgs() + if len(args) > 1 { + return nil, c.ArgErr() + } + ca.staleUpTo = 1 * time.Hour + if len(args) == 1 { + d, err := time.ParseDuration(args[0]) + if err != nil { + return nil, err + } + if d < 0 { + return nil, errors.New("invalid negative duration for serve_stale") + } + ca.staleUpTo = d + } default: return nil, c.ArgErr() } diff --git a/plugin/cache/setup_test.go b/plugin/cache/setup_test.go index 975520d316b..6352bcadbf3 100644 --- a/plugin/cache/setup_test.go +++ b/plugin/cache/setup_test.go @@ -1,6 +1,7 @@ package cache import ( + "fmt" "testing" "time" @@ -113,3 +114,39 @@ func TestSetup(t *testing.T) { } } } + +func TestServeStale(t *testing.T) { + tests := []struct { + input string + shouldErr bool + staleUpTo time.Duration + }{ + {"serve_stale", false, 1 * time.Hour}, + {"serve_stale 20m", false, 20 * time.Minute}, + {"serve_stale 1h20m", false, 80 * time.Minute}, + {"serve_stale 0m", false, 0}, + {"serve_stale 0", false, 0}, + // fails + {"serve_stale 20", true, 0}, + {"serve_stale -20m", true, 0}, + {"serve_stale aa", true, 0}, + {"serve_stale 1m nono", true, 0}, + } + for i, test := range tests { + c := caddy.NewTestController("dns", fmt.Sprintf("cache {\n%s\n}", test.input)) + ca, err := cacheParse(c) + if test.shouldErr && err == nil { + t.Errorf("Test %v: Expected error but found nil", i) + continue + } else if !test.shouldErr && err != nil { + t.Errorf("Test %v: Expected no error but found error: %v", i, err) + continue + } + if test.shouldErr && err != nil { + continue + } + if ca.staleUpTo != test.staleUpTo { + t.Errorf("Test %v: Expected stale %v but found: %v", i, test.staleUpTo, ca.staleUpTo) + } + } +} From 787b6e06f9b902e9983959f200e843e6fd8c7453 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 29 Nov 2019 16:22:50 +0000 Subject: [PATCH 285/324] Update governance docs to reflect CODEOWNERS (#3491) NO GOVERNANCE changes made! Point to the correct files, now that I merged CODEOWNERS, OWNERS files are gone and there is also only 1 CODEOWNERS. Signed-off-by: Miek Gieben --- CONTRIBUTING.md | 2 +- GOVERNANCE.md | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d3846f664f2..eeb644cfcc0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,7 +40,7 @@ issue first and "claim" it and get feedback before you invest a lot of time. **If someone already opened a pull request, but you think the pull request has stalled and you would like to open another pull request for the same or similar feature, get some of the maintainers (see -[OWNERS](OWNERS)) involved to resolve the situation and move things forward.** +[CODEOWNERS](CODEOWNERS)) involved to resolve the situation and move things forward.** If possible make a pull request as small as possible, or submit multiple pull request to complete a feature. Smaller means: easier to understand and review. This in turn means things can be merged diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 3171557b0ee..36c531b582d 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -22,8 +22,8 @@ The term of the project lead is one year, with no term limit restriction. The project lead is elected by CoreDNS maintainers according to an individual's technical merit to CoreDNS project. -The current project lead is identified in the top level [OWNERS](OWNERS) file with the string -`project lead` and the term behind the name. +The current project lead is identified in the [CODEOWNERS](CODEOWNERS) file with the string +`project lead` and the term behind the name in a comment at the top of the file. ## Expectations from Maintainers @@ -35,11 +35,9 @@ participate in Pull Request reviews. Maintainers are expected to respond to assi in a *reasonable* time frame, either providing insights, or assign the Pull Requests to other maintainers. -Every Maintainer is listed in the top-level [OWNERS](https://github.com/coredns/coredns/blob/master/OWNERS) -file, with their Github handle and a possibly obfuscated email address. Everyone in the -`approvers` list is a Maintainer. - -A Maintainer is also listed in a plugin specific OWNERS file. +Every Maintainer is listed in the +[CODEOWNERS](https://github.com/coredns/coredns/blob/master/CODEOWNERS) +file, with their Github handle. A Maintainer should be a member of `maintainers@coredns.io`, although this is not a hard requirement. @@ -66,7 +64,7 @@ Changes in project lead or term is initiated by opening a github PR. Anyone from CoreDNS community can vote on the PR with either +1 or -1. Only the following votes are binding: -1) Any maintainer that has been listed in the top-level [OWNERS](OWNERS) file before the PR is opened. +1) Any maintainer that has been listed in the [CODEOWNERS](CODEOWNERS) file before the PR is opened. 2) Any maintainer from an organization may cast the vote for that organization. However, no organization should have more binding votes than 1/5 of the total number of maintainers defined in 1). From 2503df905638710a171f61900b59d1e64316a306 Mon Sep 17 00:00:00 2001 From: Zou Nengren Date: Sat, 30 Nov 2019 18:33:49 +0800 Subject: [PATCH 286/324] introduce issue and pull request templates (#3493) * introduce issue and pull request templates Signed-off-by: zouyee * introduce issue and pull request templates Signed-off-by: zouyee --- .github/ISSUE_TEMPLATE/bug-report.md | 27 +++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/enhancement.md | 11 +++++++++++ 2 files changed, 38 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug-report.md create mode 100644 .github/ISSUE_TEMPLATE/enhancement.md diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 00000000000..414c7f9298f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,27 @@ +--- +name: Bug Report +about: Report a bug encountered while using CoreDNS +labels: bug + +--- + + + +**What happened**: + +**What you expected to happen**: + +**How to reproduce it (as minimally and precisely as possible)**: + +**Anything else we need to know?**: + +**Environment**: + +- the version of CoreDNS: +- Corefile: +- logs, if applicable: +- OS (e.g: `cat /etc/os-release`): +- Others: diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md new file mode 100644 index 00000000000..d39c37c0109 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -0,0 +1,11 @@ +--- +name: Enhancement Request +about: Suggest an enhancement to the CoreDNS project +labels: enhancement + +--- + + +**What would you like to be added**: + +**Why is this needed**: From af1fe4172ab34db69be7b141a99ad2771514b534 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2019 09:15:51 -0800 Subject: [PATCH 287/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.41 to 1.25.43 (#3497) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.41 to 1.25.43. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.41...v1.25.43) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index c5918c17edc..c520956eac8 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect - github.com/aws/aws-sdk-go v1.25.41 + github.com/aws/aws-sdk-go v1.25.43 github.com/caddyserver/caddy v1.0.4 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index 9bec1f3c442..4f07a2a8b40 100644 --- a/go.sum +++ b/go.sum @@ -72,6 +72,8 @@ github.com/aws/aws-sdk-go v1.25.36 h1:4+TL/Y2G5hsR1zdfHmjNG1ou1WEqsSWk8v7m1GaDKy github.com/aws/aws-sdk-go v1.25.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.41 h1:/hj7nZ0586wFqpwjNpzWiUTwtaMgxAZNZKHay80MdXw= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.43 h1:R5YqHQFIulYVfgRySz9hvBRTWBjudISa+r0C8XQ1ufg= +github.com/aws/aws-sdk-go v1.25.43/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 13fca02316b85168bbc56691aa0251b1df403407 Mon Sep 17 00:00:00 2001 From: Zou Nengren Date: Fri, 6 Dec 2019 18:55:40 +0800 Subject: [PATCH 288/324] use pkg/reuseport in rest plugins (#3492) Automatically submitted. --- plugin/dnstap/dnstapio/io_test.go | 10 ++++++---- plugin/metrics/metrics.go | 3 ++- plugin/pkg/dnstest/server.go | 4 +++- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/plugin/dnstap/dnstapio/io_test.go b/plugin/dnstap/dnstapio/io_test.go index dc84cee7752..93bfae88af9 100644 --- a/plugin/dnstap/dnstapio/io_test.go +++ b/plugin/dnstap/dnstapio/io_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + "github.com/coredns/coredns/plugin/pkg/reuseport" + tap "github.com/dnstap/golang-dnstap" fs "github.com/farsightsec/golang-framestream" ) @@ -55,7 +57,7 @@ func TestTransport(t *testing.T) { for _, param := range transport { // Start TCP listener - l, err := net.Listen(param[0], param[1]) + l, err := reuseport.Listen(param[0], param[1]) if err != nil { t.Fatalf("Cannot start listener: %s", err) } @@ -82,7 +84,7 @@ func TestRace(t *testing.T) { count := 10 // Start TCP listener - l, err := net.Listen("tcp", endpointTCP) + l, err := reuseport.Listen("tcp", endpointTCP) if err != nil { t.Fatalf("Cannot start listener: %s", err) } @@ -115,7 +117,7 @@ func TestReconnect(t *testing.T) { count := 5 // Start TCP listener - l, err := net.Listen("tcp", endpointTCP) + l, err := reuseport.Listen("tcp", endpointTCP) if err != nil { t.Fatalf("Cannot start listener: %s", err) } @@ -141,7 +143,7 @@ func TestReconnect(t *testing.T) { l.Close() // And start TCP listener again on the same port - l, err = net.Listen("tcp", addr) + l, err = reuseport.Listen("tcp", addr) if err != nil { t.Fatalf("Cannot start listener: %s", err) } diff --git a/plugin/metrics/metrics.go b/plugin/metrics/metrics.go index 2b165e5b902..ddadd7acace 100644 --- a/plugin/metrics/metrics.go +++ b/plugin/metrics/metrics.go @@ -10,6 +10,7 @@ import ( "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/metrics/vars" + "github.com/coredns/coredns/plugin/pkg/reuseport" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -95,7 +96,7 @@ func (m *Metrics) ZoneNames() []string { // OnStartup sets up the metrics on startup. func (m *Metrics) OnStartup() error { - ln, err := net.Listen("tcp", m.Addr) + ln, err := reuseport.Listen("tcp", m.Addr) if err != nil { log.Errorf("Failed to start metrics handler: %s", err) return err diff --git a/plugin/pkg/dnstest/server.go b/plugin/pkg/dnstest/server.go index bbad0f96bfa..94c390623fa 100644 --- a/plugin/pkg/dnstest/server.go +++ b/plugin/pkg/dnstest/server.go @@ -3,6 +3,8 @@ package dnstest import ( "net" + "github.com/coredns/coredns/plugin/pkg/reuseport" + "github.com/miekg/dns" ) @@ -27,7 +29,7 @@ func NewServer(f dns.HandlerFunc) *Server { s2 := &dns.Server{} // tcp for i := 0; i < 5; i++ { // 5 attempts - s2.Listener, _ = net.Listen("tcp", ":0") + s2.Listener, _ = reuseport.Listen("tcp", ":0") if s2.Listener == nil { continue } From 45c43554a8fea6252986bbd50307ed6300894892 Mon Sep 17 00:00:00 2001 From: Zou Nengren Date: Fri, 6 Dec 2019 19:21:34 +0800 Subject: [PATCH 289/324] make Prot to method (#3500) Signed-off-by: zouyee --- request/request.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/request/request.go b/request/request.go index 76bb6a7876e..7374b0bd69f 100644 --- a/request/request.go +++ b/request/request.go @@ -111,15 +111,11 @@ func (r *Request) RemoteAddr() string { return r.W.RemoteAddr().String() } func (r *Request) LocalAddr() string { return r.W.LocalAddr().String() } // Proto gets the protocol used as the transport. This will be udp or tcp. -func (r *Request) Proto() string { return Proto(r.W) } - -// Proto gets the protocol used as the transport. This will be udp or tcp. -func Proto(w dns.ResponseWriter) string { - // FIXME(miek): why not a method on Request - if _, ok := w.RemoteAddr().(*net.UDPAddr); ok { +func (r *Request) Proto() string { + if _, ok := r.W.RemoteAddr().(*net.UDPAddr); ok { return "udp" } - if _, ok := w.RemoteAddr().(*net.TCPAddr); ok { + if _, ok := r.W.RemoteAddr().(*net.TCPAddr); ok { return "tcp" } return "udp" From 25ef9b2d697fba0499f98314de3d98184eb5394b Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 6 Dec 2019 15:39:37 +0000 Subject: [PATCH 290/324] Add more paths to CODEOWNERS (#3502) This splits it up some more; meaning less PRs should hit the top-level which lists of a lot owners. Still want to keep that list for important project wide changes Fixes: #3495 Signed-off-by: Miek Gieben --- CODEOWNERS | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CODEOWNERS b/CODEOWNERS index 67c24cd5df9..520a86ec269 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,6 +2,14 @@ * @bradbeam @chrisohaver @dilyevsky @fastest963 @greenpau @grobie @isolus @johnbelamaric @miekg @pmoroney @rajansandeep @stp-ip @superq @yongtang +/plugin/pkg/ @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +/coremain/ @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +/core/ @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +/request/ @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +/plugin/* @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +go.sum @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip +go.mod @miekg @chrisohaver @johnbelamaric @yongtang @stp-ip + /plugin/acl/ @miekg @ihac /plugin/any/ @miekg /plugin/auto/ @miekg @stp-ip From 799dce4bffeb6d40b5b15cca47f34d4a7d24e6d3 Mon Sep 17 00:00:00 2001 From: Zou Nengren Date: Sat, 7 Dec 2019 03:04:49 +0800 Subject: [PATCH 291/324] redirect handler for pprof index (#3503) Signed-off-by: zouyee --- plugin/pprof/pprof.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugin/pprof/pprof.go b/plugin/pprof/pprof.go index ddf5ab2dfbf..822e6e222f3 100644 --- a/plugin/pprof/pprof.go +++ b/plugin/pprof/pprof.go @@ -31,6 +31,9 @@ func (h *handler) Startup() error { h.ln = ln h.mux = http.NewServeMux() + h.mux.HandleFunc(path, func(rw http.ResponseWriter, req *http.Request) { + http.Redirect(rw, req, path+"/", http.StatusFound) + }) h.mux.HandleFunc(path+"/", pp.Index) h.mux.HandleFunc(path+"/cmdline", pp.Cmdline) h.mux.HandleFunc(path+"/profile", pp.Profile) From a53321d9d6bdc80cc4aa88982f401ef2e3267e34 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Fri, 6 Dec 2019 19:54:31 +0000 Subject: [PATCH 292/324] plugin/sign: fix signing of authoritative data (#3479) Don't sign data we are not authoritative for. This adds an AuthWalk which skips names we should not authoritative for. Adds a few tests to check this is the case. Generates zones have been compared to dnssec-signzone. A number of changes have been made: * don't add DS records to the apex * NSEC TTL is the SOA's minttl value (copying bind9) * Various cleanups * signer struct was cleaned up: doesn't need ttl, nor expiration or inception. * plugin/sign: remove apex stuff from names() This is never used because we will always have other types in the apex, because we *ADD* them ourselves, before we sign (DNSKEY, CDS and CDNSKEY). Signed-off-by: Miek Gieben Co-Authored-By: Chris O'Haver --- plugin/file/tree/auth_walk.go | 58 ++++++++++++++++++ plugin/file/tree/walk.go | 11 ++-- plugin/sign/README.md | 27 +++++---- plugin/sign/nsec.go | 23 +++---- plugin/sign/nsec_test.go | 27 +++++++++ plugin/sign/resign_test.go | 2 - plugin/sign/signer.go | 51 ++++++---------- plugin/sign/signer_test.go | 96 +++++++++++++++++++++++++++--- plugin/sign/testdata/db.miek.nl_ns | 10 ++++ 9 files changed, 235 insertions(+), 70 deletions(-) create mode 100644 plugin/file/tree/auth_walk.go create mode 100644 plugin/sign/nsec_test.go create mode 100644 plugin/sign/testdata/db.miek.nl_ns diff --git a/plugin/file/tree/auth_walk.go b/plugin/file/tree/auth_walk.go new file mode 100644 index 00000000000..1f436716fdb --- /dev/null +++ b/plugin/file/tree/auth_walk.go @@ -0,0 +1,58 @@ +package tree + +import ( + "github.com/miekg/dns" +) + +// AuthWalk performs fn on all authoritative values stored in the tree in +// pre-order depth first. If a non-nil error is returned the AuthWalk was interrupted +// by an fn returning that error. If fn alters stored values' sort +// relationships, future tree operation behaviors are undefined. +// +// The fn function will be called with 3 arguments, the current element, a map containing all +// the RRs for this element and a boolean if this name is considered authoritative. +func (t *Tree) AuthWalk(fn func(*Elem, map[uint16][]dns.RR, bool) error) error { + if t.Root == nil { + return nil + } + return t.Root.authwalk(make(map[string]struct{}), fn) +} + +func (n *Node) authwalk(ns map[string]struct{}, fn func(*Elem, map[uint16][]dns.RR, bool) error) error { + if n.Left != nil { + if err := n.Left.authwalk(ns, fn); err != nil { + return err + } + } + + // Check if the current name is a subdomain of *any* of the delegated names we've seen, if so, skip this name. + // The ordering of the tree and how we walk if guarantees we see parents first. + if n.Elem.Type(dns.TypeNS) != nil { + ns[n.Elem.Name()] = struct{}{} + } + + auth := true + i := 0 + for { + j, end := dns.NextLabel(n.Elem.Name(), i) + if end { + break + } + if _, ok := ns[n.Elem.Name()[j:]]; ok { + auth = false + break + } + i++ + } + + if err := fn(n.Elem, n.Elem.m, auth); err != nil { + return err + } + + if n.Right != nil { + if err := n.Right.authwalk(ns, fn); err != nil { + return err + } + } + return nil +} diff --git a/plugin/file/tree/walk.go b/plugin/file/tree/walk.go index 00accbafafc..e315eb03fa7 100644 --- a/plugin/file/tree/walk.go +++ b/plugin/file/tree/walk.go @@ -2,25 +2,28 @@ package tree import "github.com/miekg/dns" -// Walk performs fn on all values stored in the tree. If a non-nil error is returned the -// Walk was interrupted by an fn returning that error. If fn alters stored values' sort +// Walk performs fn on all authoritative values stored in the tree in +// in-order depth first. If a non-nil error is returned the Walk was interrupted +// by an fn returning that error. If fn alters stored values' sort // relationships, future tree operation behaviors are undefined. -func (t *Tree) Walk(fn func(e *Elem, rrs map[uint16][]dns.RR) error) error { +func (t *Tree) Walk(fn func(*Elem, map[uint16][]dns.RR) error) error { if t.Root == nil { return nil } return t.Root.walk(fn) } -func (n *Node) walk(fn func(e *Elem, rrs map[uint16][]dns.RR) error) error { +func (n *Node) walk(fn func(*Elem, map[uint16][]dns.RR) error) error { if n.Left != nil { if err := n.Left.walk(fn); err != nil { return err } } + if err := fn(n.Elem, n.Elem.m); err != nil { return err } + if n.Right != nil { if err := n.Right.walk(fn); err != nil { return err diff --git a/plugin/sign/README.md b/plugin/sign/README.md index d2782816ae8..90d687e59f2 100644 --- a/plugin/sign/README.md +++ b/plugin/sign/README.md @@ -9,8 +9,7 @@ The *sign* plugin is used to sign (see RFC 6781) zones. In this process DNSSEC resource records are added. The signatures that sign the resource records sets have an expiration date, this means the signing process must be repeated before this expiration data is reached. Otherwise the zone's data -will go BAD (RFC 4035, Section 5.5). The *sign* plugin takes care of this. *Sign* works, but has -a couple of limitations, see the "Bugs" section. +will go BAD (RFC 4035, Section 5.5). The *sign* plugin takes care of this. Only NSEC is supported, *sign* does not support NSEC3. @@ -32,17 +31,21 @@ it do key or algorithm rollovers - it just signs. Both these dates are only checked on the SOA's signature(s). - * Create signatures that have an inception of -3 hours (minus a jitter between 0 and 18 hours) + * Create RRSIGs that have an inception of -3 hours (minus a jitter between 0 and 18 hours) and a expiration of +32 days for every given DNSKEY. + * Add NSEC records for all names in the zone. The TTL for these is the negative cache TTL from the + SOA record. + * Add or replace *all* apex CDS/CDNSKEY records with the ones derived from the given keys. For each key two CDS are created one with SHA1 and another with SHA256. * Update the SOA's serial number to the *Unix epoch* of when the signing happens. This will overwrite *any* previous serial number. -Thus there are two ways that dictate when a zone is signed. Normally every 6 days (plus jitter) it -will be resigned. If for some reason we fail this check, the 14 days before expiring kicks in. + +There are two ways that dictate when a zone is signed. Normally every 6 days (plus jitter) it will +be resigned. If for some reason we fail this check, the 14 days before expiring kicks in. Keys are named (following BIND9): `K++.key` and `K++.private`. The keys **must not** be included in your zone; they will be added by *sign*. These keys can be @@ -144,8 +147,8 @@ example.org example.net { This will lead to `db.example.org` be signed *twice*, as this entire section is parsed twice because you have specified the origins `example.org` and `example.net` in the server block. -Forcibly resigning a zone can be accomplished by removing the signed zone file (CoreDNS will keep on -serving it from memory), and sending SIGUSR1 to the process to make it reload and resign the zone +Forcibly resigning a zone can be accomplished by removing the signed zone file (CoreDNS will keep +on serving it from memory), and sending SIGUSR1 to the process to make it reload and resign the zone file. ## Also See @@ -153,9 +156,13 @@ file. The DNSSEC RFCs: RFC 4033, RFC 4034 and RFC 4035. And the BCP on DNSSEC, RFC 6781. Further more the manual pages coredns-keygen(1) and dnssec-keygen(8). And the *file* plugin's documentation. -Coredns-keygen can be found at in the coredns-keygen directory. +Coredns-keygen can be found at +[https://github.com/coredns/coredns-utils](https://github.com/coredns/coredns-utils) in the +coredns-keygen directory. + +Other useful DNSSEC tools can be found in [ldns](https://nlnetlabs.nl/projects/ldns/about/), e.g. +`ldns-key2ds` to create DS records from DNSKEYs. ## Bugs -`keys directory` is not implemented. Glue records are currently signed, and no DS records are added -for child zones. +`keys directory` is not implemented. diff --git a/plugin/sign/nsec.go b/plugin/sign/nsec.go index d726ade72d1..d7c6a30a361 100644 --- a/plugin/sign/nsec.go +++ b/plugin/sign/nsec.go @@ -9,24 +9,19 @@ import ( "github.com/miekg/dns" ) -// names returns the elements of the zone in nsec order. If the returned boolean is true there were -// no other apex records than SOA and NS, which are stored separately. -func names(origin string, z *file.Zone) ([]string, bool) { - // if there are no apex records other than NS and SOA we'll miss the origin - // in this list. Check the first element and if not origin prepend it. +// names returns the elements of the zone in nsec order. +func names(origin string, z *file.Zone) []string { + // There will also be apex records other than NS and SOA (who are kept separate), as we + // are adding DNSKEY and CDS/CDNSKEY records in the apex *before* we sign. n := []string{} - z.Walk(func(e *tree.Elem, _ map[uint16][]dns.RR) error { + z.AuthWalk(func(e *tree.Elem, _ map[uint16][]dns.RR, auth bool) error { + if !auth { + return nil + } n = append(n, e.Name()) return nil }) - if len(n) == 0 { - return nil, false - } - if n[0] != origin { - n = append([]string{origin}, n...) - return n, true - } - return n, false + return n } // NSEC returns an NSEC record according to name, next, ttl and bitmap. Note that the bitmap is sorted before use. diff --git a/plugin/sign/nsec_test.go b/plugin/sign/nsec_test.go new file mode 100644 index 00000000000..f272651fc6a --- /dev/null +++ b/plugin/sign/nsec_test.go @@ -0,0 +1,27 @@ +package sign + +import ( + "os" + "testing" + + "github.com/coredns/coredns/plugin/file" +) + +func TestNames(t *testing.T) { + f, err := os.Open("testdata/db.miek.nl_ns") + if err != nil { + t.Error(err) + } + z, err := file.Parse(f, "db.miek.nl_ns", "miek.nl", 0) + if err != nil { + t.Error(err) + } + + names := names("miek.nl.", z) + expected := []string{"miek.nl.", "child.miek.nl.", "www.miek.nl."} + for i := range names { + if names[i] != expected[i] { + t.Errorf("Expected %s, got %s", expected[i], names[i]) + } + } +} diff --git a/plugin/sign/resign_test.go b/plugin/sign/resign_test.go index e2571cb5b6d..2f67f52aa78 100644 --- a/plugin/sign/resign_test.go +++ b/plugin/sign/resign_test.go @@ -13,7 +13,6 @@ func TestResignInception(t *testing.T) { if x := resign(zr, then); x != nil { t.Errorf("Expected RRSIG to be valid for %s, got invalid: %s", then.Format(timeFmt), x) } - // inception starts after this date. zr = strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190808191936 20190731161936 59725 miek.nl. eU6gI1OkSEbyt`) if x := resign(zr, then); x == nil { @@ -33,7 +32,6 @@ func TestResignExpire(t *testing.T) { if x := resign(zr, then); x != nil { t.Errorf("Expected RRSIG to be valid for %s, got invalid: %s", then.Format(timeFmt), x) } - // expired yesterday zr = strings.NewReader(`miek.nl. 1800 IN RRSIG SOA 13 2 1800 20190721191936 20190717161936 59725 miek.nl. eU6gI1OkSEbyt`) if x := resign(zr, then); x == nil { diff --git a/plugin/sign/signer.go b/plugin/sign/signer.go index a9a5f2e5aed..b0e2f02fef4 100644 --- a/plugin/sign/signer.go +++ b/plugin/sign/signer.go @@ -26,10 +26,6 @@ type Signer struct { signedfile string stop chan struct{} - - expiration uint32 - inception uint32 - ttl uint32 } // Sign signs a zone file according to the parameters in s. @@ -44,46 +40,31 @@ func (s *Signer) Sign(now time.Time) (*file.Zone, error) { return nil, err } - s.inception, s.expiration = lifetime(now, s.jitter) - - s.ttl = z.Apex.SOA.Header().Ttl + mttl := z.Apex.SOA.Minttl + ttl := z.Apex.SOA.Header().Ttl + inception, expiration := lifetime(now, s.jitter) z.Apex.SOA.Serial = uint32(now.Unix()) for _, pair := range s.keys { - pair.Public.Header().Ttl = s.ttl // set TTL on key so it matches the RRSIG. + pair.Public.Header().Ttl = ttl // set TTL on key so it matches the RRSIG. z.Insert(pair.Public) - z.Insert(pair.Public.ToDS(dns.SHA1)) - z.Insert(pair.Public.ToDS(dns.SHA256)) z.Insert(pair.Public.ToDS(dns.SHA1).ToCDS()) z.Insert(pair.Public.ToDS(dns.SHA256).ToCDS()) z.Insert(pair.Public.ToCDNSKEY()) } - names, apex := names(s.origin, z) + names := names(s.origin, z) ln := len(names) - var nsec *dns.NSEC - if apex { - nsec = NSEC(s.origin, names[(ln+1)%ln], s.ttl, []uint16{dns.TypeSOA, dns.TypeNS, dns.TypeRRSIG, dns.TypeNSEC}) - z.Insert(nsec) - } - for _, pair := range s.keys { - rrsig, err := pair.signRRs([]dns.RR{z.Apex.SOA}, s.origin, s.ttl, s.inception, s.expiration) + rrsig, err := pair.signRRs([]dns.RR{z.Apex.SOA}, s.origin, ttl, inception, expiration) if err != nil { return nil, err } z.Insert(rrsig) // NS apex may not be set if RR's have been discarded because the origin doesn't match. if len(z.Apex.NS) > 0 { - rrsig, err = pair.signRRs(z.Apex.NS, s.origin, s.ttl, s.inception, s.expiration) - if err != nil { - return nil, err - } - z.Insert(rrsig) - } - if apex { - rrsig, err = pair.signRRs([]dns.RR{nsec}, s.origin, s.ttl, s.inception, s.expiration) + rrsig, err = pair.signRRs(z.Apex.NS, s.origin, ttl, inception, expiration) if err != nil { return nil, err } @@ -93,21 +74,27 @@ func (s *Signer) Sign(now time.Time) (*file.Zone, error) { // We are walking the tree in the same direction, so names[] can be used here to indicated the next element. i := 1 - err = z.Walk(func(e *tree.Elem, zrrs map[uint16][]dns.RR) error { - if !apex && e.Name() == s.origin { - nsec := NSEC(e.Name(), names[(ln+i)%ln], s.ttl, append(e.Types(), dns.TypeNS, dns.TypeSOA, dns.TypeNSEC, dns.TypeRRSIG)) + err = z.AuthWalk(func(e *tree.Elem, zrrs map[uint16][]dns.RR, auth bool) error { + if !auth { + return nil + } + + if e.Name() == s.origin { + nsec := NSEC(e.Name(), names[(ln+i)%ln], mttl, append(e.Types(), dns.TypeNS, dns.TypeSOA, dns.TypeRRSIG, dns.TypeNSEC)) z.Insert(nsec) } else { - nsec := NSEC(e.Name(), names[(ln+i)%ln], s.ttl, append(e.Types(), dns.TypeNSEC, dns.TypeRRSIG)) + nsec := NSEC(e.Name(), names[(ln+i)%ln], mttl, append(e.Types(), dns.TypeRRSIG, dns.TypeNSEC)) z.Insert(nsec) } for t, rrs := range zrrs { - if t == dns.TypeRRSIG { + // RRSIGs are not signed and NS records are not signed because we are never authoratiative for them. + // The zone's apex nameservers records are not kept in this tree and are signed separately. + if t == dns.TypeRRSIG || t == dns.TypeNS { continue } for _, pair := range s.keys { - rrsig, err := pair.signRRs(rrs, s.origin, s.ttl, s.inception, s.expiration) + rrsig, err := pair.signRRs(rrs, s.origin, rrs[0].Header().Ttl, inception, expiration) if err != nil { return err } diff --git a/plugin/sign/signer_test.go b/plugin/sign/signer_test.go index 6ba94247d41..bb364728ec8 100644 --- a/plugin/sign/signer_test.go +++ b/plugin/sign/signer_test.go @@ -1,6 +1,7 @@ package sign import ( + "bytes" "io/ioutil" "os" "testing" @@ -29,8 +30,8 @@ func TestSign(t *testing.T) { } apex, _ := z.Search("miek.nl.") - if x := apex.Type(dns.TypeDS); len(x) != 2 { - t.Errorf("Expected %d DS records, got %d", 2, len(x)) + if x := apex.Type(dns.TypeDS); len(x) != 0 { + t.Errorf("Expected %d DS records, got %d", 0, len(x)) } if x := apex.Type(dns.TypeCDS); len(x) != 2 { t.Errorf("Expected %d CDS records, got %d", 2, len(x)) @@ -75,14 +76,14 @@ $ORIGIN example.org. if x := nsec[0].(*dns.NSEC).NextDomain; x != "example.org." { t.Errorf("Expected NSEC NextDomain %s, got %s", "example.org.", x) } - if x := nsec[0].(*dns.NSEC).TypeBitMap; len(x) != 8 { - t.Errorf("Expected NSEC bitmap to be %d elements, got %d", 8, x) + if x := nsec[0].(*dns.NSEC).TypeBitMap; len(x) != 7 { + t.Errorf("Expected NSEC bitmap to be %d elements, got %d", 7, x) } - if x := nsec[0].(*dns.NSEC).TypeBitMap; x[7] != dns.TypeCDNSKEY { - t.Errorf("Expected NSEC bitmap element 6 to be %d, got %d", dns.TypeCDNSKEY, x[7]) + if x := nsec[0].(*dns.NSEC).TypeBitMap; x[6] != dns.TypeCDNSKEY { + t.Errorf("Expected NSEC bitmap element 5 to be %d, got %d", dns.TypeCDNSKEY, x[6]) } - if x := nsec[0].(*dns.NSEC).TypeBitMap; x[5] != dns.TypeDNSKEY { - t.Errorf("Expected NSEC bitmap element 5 to be %d, got %d", dns.TypeDNSKEY, x[5]) + if x := nsec[0].(*dns.NSEC).TypeBitMap; x[4] != dns.TypeDNSKEY { + t.Errorf("Expected NSEC bitmap element 4 to be %d, got %d", dns.TypeDNSKEY, x[4]) } dnskey := el.Type(dns.TypeDNSKEY) if x := dnskey[0].Header().Ttl; x != 1800 { @@ -100,3 +101,82 @@ $ORIGIN example.org. } } } + +func TestSignGlue(t *testing.T) { + input := `sign testdata/db.miek.nl miek.nl { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }` + c := caddy.NewTestController("dns", input) + sign, err := parse(c) + if err != nil { + t.Fatal(err) + } + if len(sign.signers) != 1 { + t.Fatalf("Expected 1 signer, got %d", len(sign.signers)) + } + z, err := sign.signers[0].Sign(time.Now().UTC()) + if err != nil { + t.Error(err) + } + + e, _ := z.Search("ns2.bla.miek.nl.") + sigs := e.Type(dns.TypeRRSIG) + if len(sigs) != 0 { + t.Errorf("Expected no RRSIG for %s, got %d", "ns2.bla.miek.nl.", len(sigs)) + } +} + +func TestSignDS(t *testing.T) { + input := `sign testdata/db.miek.nl_ns miek.nl { + key file testdata/Kmiek.nl.+013+59725 + directory testdata + }` + c := caddy.NewTestController("dns", input) + sign, err := parse(c) + if err != nil { + t.Fatal(err) + } + if len(sign.signers) != 1 { + t.Fatalf("Expected 1 signer, got %d", len(sign.signers)) + } + z, err := sign.signers[0].Sign(time.Now().UTC()) + if err != nil { + t.Error(err) + } + + // dnssec-signzone outputs this for db.miek.nl_ns: + // + // child.miek.nl. 1800 IN NS ns.child.miek.nl. + // child.miek.nl. 1800 IN DS 34385 13 2 fc7397c77afbccb6742fc.... + // child.miek.nl. 1800 IN RRSIG DS 13 3 1800 20191223121229 20191123121229 59725 miek.nl. ZwptLzVVs.... + // child.miek.nl. 14400 IN NSEC www.miek.nl. NS DS RRSIG NSEC + // child.miek.nl. 14400 IN RRSIG NSEC 13 3 14400 20191223121229 20191123121229 59725 miek.nl. w+CcA8... + + name := "child.miek.nl." + e, _ := z.Search(name) + if x := len(e.Types()); x != 4 { // NS DS NSEC and 2x RRSIG + t.Errorf("Expected 4 records for %s, got %d", name, x) + } + + ds := e.Type(dns.TypeDS) + if len(ds) != 1 { + t.Errorf("Expected DS for %s, got %d", name, len(ds)) + } + sigs := e.Type(dns.TypeRRSIG) + if len(sigs) != 2 { + t.Errorf("Expected no RRSIG for %s, got %d", name, len(sigs)) + } + nsec := e.Type(dns.TypeNSEC) + if x := nsec[0].(*dns.NSEC).NextDomain; x != "www.miek.nl." { + t.Errorf("Expected no NSEC NextDomain to be %s for %s, got %s", "www.miek.nl.", name, x) + } + minttl := z.Apex.SOA.Minttl + if x := nsec[0].Header().Ttl; x != minttl { + t.Errorf("Expected no NSEC TTL to be %d for %s, got %d", minttl, "www.miek.nl.", x) + } + // print zone on error + buf := &bytes.Buffer{} + write(buf, z) + t.Logf("%s\n", buf) +} diff --git a/plugin/sign/testdata/db.miek.nl_ns b/plugin/sign/testdata/db.miek.nl_ns new file mode 100644 index 00000000000..bd2371f1600 --- /dev/null +++ b/plugin/sign/testdata/db.miek.nl_ns @@ -0,0 +1,10 @@ +$TTL 30M +$ORIGIN miek.nl. +@ IN SOA linode.atoom.net. miek.miek.nl. ( 1282630060 4H 1H 7D 4H ) + NS linode.atoom.net. + DNSKEY 257 3 13 sfzRg5nDVxbeUc51su4MzjgwpOpUwnuu81SlRHqJuXe3SOYOeypR69tZ52XLmE56TAmPHsiB8Rgk+NTpf0o1Cw== + +www AAAA ::1 +child NS ns.child +ns.child AAAA ::1 +child DS 34385 13 2 fc7397c77afbccb6742fcff19c7b1410d0044661e7085fc200ae1ab3d15a5842 From ecd5c0997ddb13e87182563d3d15f645ef4c0fb8 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 7 Dec 2019 08:36:43 +0000 Subject: [PATCH 293/324] plugin.cfg: go gen -> go generate (#3511) This takes over #3476 because that is a good change but lacks the DCO and the author seems unresponsive. Reformat the file a bit more while we're at it. Closes: #3476 Signed-off-by: Miek Gieben --- plugin.cfg | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/plugin.cfg b/plugin.cfg index d40cf7a33cd..3f3dd85fd67 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -1,20 +1,20 @@ -# Directives are registered in the order they should be -# executed. +# Directives are registered in the order they should be executed. # -# Ordering is VERY important. Every plugin will -# feel the effects of all other plugin below -# (after) them during a request, but they must not -# care what plugin above them are doing. +# Ordering is VERY important. Every plugin will feel the effects of all other +# plugin below (after) them during a request, but they must not care what plugin +# above them are doing. -# How to rebuild with updated plugin configurations: -# Modify the list below and run `go gen && go build` +# How to rebuild with updated plugin configurations: Modify the list below and +# run `go generate && go build` -# The parser takes the input format of +# The parser takes the input format of: +# # : # Or # : # # External plugin example: +# # log:github.com/coredns/coredns/plugin/log # Local plugin example: # log:log From 6cac0de83ab64a9076510b792543592a0b69f501 Mon Sep 17 00:00:00 2001 From: Miek Gieben Date: Sat, 7 Dec 2019 12:04:14 +0000 Subject: [PATCH 294/324] Update github templates (#3510) An ISSUE_TEMPLATE directory was added, meaning the ISSUE_TEMPLATE.md is now obsolete. Remove stale.yml because not used. Symlink CONTRIBUTING.md the other way around to get GitHub recognize it. Signed-off-by: Miek Gieben --- .github/CONTRIBUTING.md | 82 ++++++++++++++++++++++++++++++++++++++- .github/ISSUE_TEMPLATE.md | 8 ---- .github/stale.yml | 19 --------- CONTRIBUTING.md | 82 +-------------------------------------- 4 files changed, 82 insertions(+), 109 deletions(-) mode change 120000 => 100644 .github/CONTRIBUTING.md delete mode 100644 .github/ISSUE_TEMPLATE.md delete mode 100644 .github/stale.yml mode change 100644 => 120000 CONTRIBUTING.md diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md deleted file mode 120000 index 44fcc634393..00000000000 --- a/.github/CONTRIBUTING.md +++ /dev/null @@ -1 +0,0 @@ -../CONTRIBUTING.md \ No newline at end of file diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000000..eeb644cfcc0 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing to CoreDNS + +Welcome! Our community focuses on helping others and making CoreDNS the best it can be. We gladly +accept contributions and encourage you to get involved! + +## Bug Reports + +First, please [search this +repository](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with a variety +of keywords to ensure your bug is not already reported. + +If not, [open an issue](https://github.com/coredns/coredns/issues) and answer the questions so we +can understand and reproduce the problematic behavior. + +The burden is on you to convince us that it is actually a bug in CoreDNS. This is easiest to do when +you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). +The more detailed and specific you are, the faster we will be able to help you. Check out [How to +Report Bugs Effectively](https://www.chiark.greenend.org.uk/~sgtatham/bugs.html). + +Please be kind. :smile: Remember that CoreDNS comes at no cost to you, and you're getting free help. + +## Minor Improvements and New Tests + +Submit [pull requests](https://github.com/coredns/coredns/pulls) at any time. Make sure to write +tests to assert your change is working properly and is thoroughly covered. + +## New Features + +First, please [search](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with +a variety of keywords to ensure your suggestion/proposal is new. + +Please also check for existing pull requests to see if someone is already working on this. We want +to avoid duplication of effort. + +If the proposal is new and no one has opened pull request yet, you may open either an issue or a +pull request for discussion and feedback. + +If you are going to spend significant time implementing code for a pull request, best to open an +issue first and "claim" it and get feedback before you invest a lot of time. + +**If someone already opened a pull request, but you think the pull request has stalled and you would +like to open another pull request for the same or similar feature, get some of the maintainers (see +[CODEOWNERS](CODEOWNERS)) involved to resolve the situation and move things forward.** + +If possible make a pull request as small as possible, or submit multiple pull request to complete a +feature. Smaller means: easier to understand and review. This in turn means things can be merged +faster. + +## New Plugins + +A new plugin is (usually) about 1000 lines of Go. This includes tests and some plugin boiler plate. +This is a considerable amount of code and will take time to review. To prevent too much back and +forth it is advisable to start with the plugin's `README.md`; This will be its main documentation +and will help nail down the correct name of the plugin and its various config options. + +From there it can work its way through the rest (`setup.go`, the `ServeDNS` handler function, etc.). +Doing this will help the reviewers, as each chunk of code is relatively small. + +Also read [plugin.md](https://raw.githubusercontent.com/coredns/coredns/master/plugin.md) for +advice on how to write a plugin. + +## Updating Dependencies + +We use [Go Modules](https://github.com/golang/go/wiki/Modules) as the tool to manage vendor dependencies. + +Use the following to update the version of all dependencies +```sh +$ go get -u +``` + +After the dependencies have been updated or added, you might run the following to +cleanup the go module files: +```sh +$ go mod tidy +``` + +Please refer to [Go Modules](https://github.com/golang/go/wiki/Modules) for more details. + +# Thank You + +Thanks for your help! CoreDNS would not be what it is today without your contributions. diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index a02762671e1..00000000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,8 +0,0 @@ - diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index ebcc73a54a6..00000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,19 +0,0 @@ -# Number of days of inactivity before an issue becomes stale -daysUntilStale: 100 -# Number of days of inactivity before a stale issue is closed -daysUntilClose: 21 -# Issues with these labels will never be considered stale -exemptLabels: - - pinned - - security - - later - - bug -# Label to use when marking an issue as stale -staleLabel: wontfix-stalebot -# Comment to post when marking an issue as stale. Set to `false` to disable -markComment: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you - for your contributions. -# Comment to post when closing a stale issue. Set to `false` to disable -closeComment: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index eeb644cfcc0..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,81 +0,0 @@ -# Contributing to CoreDNS - -Welcome! Our community focuses on helping others and making CoreDNS the best it can be. We gladly -accept contributions and encourage you to get involved! - -## Bug Reports - -First, please [search this -repository](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with a variety -of keywords to ensure your bug is not already reported. - -If not, [open an issue](https://github.com/coredns/coredns/issues) and answer the questions so we -can understand and reproduce the problematic behavior. - -The burden is on you to convince us that it is actually a bug in CoreDNS. This is easiest to do when -you write clear, concise instructions so we can reproduce the behavior (even if it seems obvious). -The more detailed and specific you are, the faster we will be able to help you. Check out [How to -Report Bugs Effectively](https://www.chiark.greenend.org.uk/~sgtatham/bugs.html). - -Please be kind. :smile: Remember that CoreDNS comes at no cost to you, and you're getting free help. - -## Minor Improvements and New Tests - -Submit [pull requests](https://github.com/coredns/coredns/pulls) at any time. Make sure to write -tests to assert your change is working properly and is thoroughly covered. - -## New Features - -First, please [search](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93) with -a variety of keywords to ensure your suggestion/proposal is new. - -Please also check for existing pull requests to see if someone is already working on this. We want -to avoid duplication of effort. - -If the proposal is new and no one has opened pull request yet, you may open either an issue or a -pull request for discussion and feedback. - -If you are going to spend significant time implementing code for a pull request, best to open an -issue first and "claim" it and get feedback before you invest a lot of time. - -**If someone already opened a pull request, but you think the pull request has stalled and you would -like to open another pull request for the same or similar feature, get some of the maintainers (see -[CODEOWNERS](CODEOWNERS)) involved to resolve the situation and move things forward.** - -If possible make a pull request as small as possible, or submit multiple pull request to complete a -feature. Smaller means: easier to understand and review. This in turn means things can be merged -faster. - -## New Plugins - -A new plugin is (usually) about 1000 lines of Go. This includes tests and some plugin boiler plate. -This is a considerable amount of code and will take time to review. To prevent too much back and -forth it is advisable to start with the plugin's `README.md`; This will be its main documentation -and will help nail down the correct name of the plugin and its various config options. - -From there it can work its way through the rest (`setup.go`, the `ServeDNS` handler function, etc.). -Doing this will help the reviewers, as each chunk of code is relatively small. - -Also read [plugin.md](https://raw.githubusercontent.com/coredns/coredns/master/plugin.md) for -advice on how to write a plugin. - -## Updating Dependencies - -We use [Go Modules](https://github.com/golang/go/wiki/Modules) as the tool to manage vendor dependencies. - -Use the following to update the version of all dependencies -```sh -$ go get -u -``` - -After the dependencies have been updated or added, you might run the following to -cleanup the go module files: -```sh -$ go mod tidy -``` - -Please refer to [Go Modules](https://github.com/golang/go/wiki/Modules) for more details. - -# Thank You - -Thanks for your help! CoreDNS would not be what it is today without your contributions. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 120000 index 00000000000..784ef485bfc --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1 @@ +.github/CONTRIBUTING.md \ No newline at end of file From bb077c0e49548c88eb94d9481b305f8af90f787d Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sat, 7 Dec 2019 07:45:27 -0800 Subject: [PATCH 295/324] Update GitHub template for `CODE-OF-CONDUCT.md` => `.github/CODE_OF_CONDUCT.md` (#3514) This PR updates GitHub template for `CODE-OF-CONDUCT.md => `.github/CODE_OF_CONDUCT.md`, to take advantage of GitHub feature in: https://help.github.com/en/github/building-a-strong-community/adding-a-code-of-conduct-to-your-project This PR also moves `SECURITY.md` to `.github/SECURITY.md` and alias it for consistency. Signed-off-by: Yong Tang --- .../CODE_OF_CONDUCT.md | 0 .github/SECURITY.md | 189 +++++++++++++++++ CODE_OF_CONDUCT.md | 1 + SECURITY.md | 190 +----------------- 4 files changed, 191 insertions(+), 189 deletions(-) rename CODE-OF-CONDUCT.md => .github/CODE_OF_CONDUCT.md (100%) create mode 100644 .github/SECURITY.md create mode 120000 CODE_OF_CONDUCT.md mode change 100644 => 120000 SECURITY.md diff --git a/CODE-OF-CONDUCT.md b/.github/CODE_OF_CONDUCT.md similarity index 100% rename from CODE-OF-CONDUCT.md rename to .github/CODE_OF_CONDUCT.md diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 00000000000..f9e317ee17f --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1,189 @@ +# Security Release Process + +The CoreDNS project has adopted this security disclosures and response policy +to ensure responsible handling of critical issues. + + +## Product Security Team (PST) + +Security vulnerabilities should be handled quickly and sometimes privately. +The primary goal of this process is to reduce the total time users are vulnerable to publicly known exploits. + +The Product Security Team (PST) is responsible for organizing the entire response including internal communication and external disclosure. + +The initial Product Security Team will consist of the set of maintainers that volunteered. + +### mailing lists + +* security@coredns.io : for any security concerns. Received by Product Security Team members, and used by this Team to discuss security issues and fixes. +* coredns-distributors-announce@lists.cncf.io: for early private information on Security patch releases. see below how CoreDNS distributors can apply for this list. + + +## Disclosures + +### Private Disclosure Processes + +If you find a security vulnerability or any security related issues, +please DO NOT file a public issue. Do not create a Github issue. +Instead, send your report privately to security@coredns.io. +Security reports are greatly appreciated and we will publicly thank you for it. + +Please provide as much information as possible, so we can react quickly. +For instance, that could include: +- Description of the location and potential impact of the vulnerability; +- A detailed description of the steps required to reproduce the vulnerability (POC scripts, screenshots, and compressed packet captures are all helpful to us) +- Whatever else you think we might need to identify the source of this vulnerability + +### Public Disclosure Processes + +If you know of a publicly disclosed security vulnerability please IMMEDIATELY email security@coredns.io +to inform the Product Security Team (PST) about the vulnerability so we start the patch, release, and communication process. + +If possible the PST will ask the person making the public report if the issue can be handled via a private disclosure process +(for example if the full exploit details have not yet been published). +If the reporter denies the request for private disclosure, the PST will move swiftly with the fix and release process. +In extreme cases you can ask GitHub to delete the issue but this generally isn't necessary and is unlikely to make a public disclosure less damaging. + +## Patch, Release, and Public Communication + +For each vulnerability a member of the PST will volunteer to lead coordination with the "Fix Team" +and is responsible for sending disclosure emails to the rest of the community. +This lead will be referred to as the "Fix Lead." + +The role of Fix Lead should rotate round-robin across the PST. + +Note that given the current size of the CoreDNS community it is likely that the PST is the same as the "Fix team." +The PST may decide to bring in additional contributors for added expertise depending on the area of the code that contains the vulnerability. + +All of the timelines below are suggestions and assume a Private Disclosure. +If the Team is dealing with a Public Disclosure all timelines become ASAP. +If the fix relies on another upstream project's disclosure timeline, that will adjust the process as well. +We will work with the upstream project to fit their timeline and best protect our users. + +### Fix Team Organization + +These steps should be completed within the first 24 hours of disclosure. + +- The Fix Lead will work quickly to identify relevant engineers from the affected projects and + packages and CC those engineers into the disclosure thread. These selected developers are the Fix + Team. +- The Fix Lead will get the Fix Team access to private security repos to develop the fix. + + +### Fix Development Process + +These steps should be completed within the 1-7 days of Disclosure. + +- The Fix Lead and the Fix Team will create a + [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS + Calculator](https://www.first.org/cvss/calculator/3.0). The Fix Lead makes the final call on the + calculated CVSS; it is better to move quickly than making the CVSS perfect. +- The Fix Team will notify the Fix Lead that work on the fix branch is complete once there are LGTMs + on all commits in the private repo from one or more maintainers. + +If the CVSS score is under 4.0 ([a low severity +score](https://www.first.org/cvss/specification-document#i5)) the Fix Team can decide to slow the +release process down in the face of holidays, developer bandwidth, etc. These decisions must be +discussed on the security@coredns.io mailing list. + +### Fix Disclosure Process + +With the Fix Development underway the CoreDNS Security Team needs to come up with an overall communication plan for the wider community. +This Disclosure process should begin after the Team has developed a fix or mitigation +so that a realistic timeline can be communicated to users. + +**Disclosure of Forthcoming Fix to Users** (Completed within 1-7 days of Disclosure) + +- The Fix Lead will create a github issue in CoreDNS project to inform users that a security vulnerability +has been disclosed and that a fix will be made available, with an estimation of the Release Date. +It will include any mitigating steps users can take until a fix is available. + +The communication to users should be actionable. +They should know when to block time to apply patches, understand exact mitigation steps, etc. + +**Optional Fix Disclosure to Private Distributors List** (Completed within 1-14 days of Disclosure): + +- The Fix Lead will make a determination with the help of the Fix Team if an issue is critical enough to require early disclosure to distributors. +Generally this Private Distributor Disclosure process should be reserved for remotely exploitable or privilege escalation issues. +Otherwise, this process can be skipped. +- The Fix Lead will email the patches to coredns-distributors-announce@lists.cncf.io so distributors can prepare their own release to be available to users on the day of the issue's announcement. +Distributors should read about the [Private Distributor List](#private-distributor-list) to find out the requirements for being added to this list. +- **What if a distributor breaks embargo?** The PST will assess the damage and may make the call to release earlier or continue with the plan. +When in doubt push forward and go public ASAP. + +**Fix Release Day** (Completed within 1-21 days of Disclosure) + +- the Fix Team will selectively choose all needed commits from the Master branch in order to create a new release on top of the current last version released. +- Release process will be as usual. +- The Fix Lead will request a CVE from [DWF](https://github.com/distributedweaknessfiling/DWF-Documentation) + and include the CVSS and release details. +- The Fix Lead will inform all users, devs and integrators, now that everything is public, + announcing the new releases, the CVE number, and the relevant merged PRs to get wide distribution + and user action. As much as possible this email should be actionable and include links on how to apply + the fix to user's environments; this can include links to external distributor documentation. + + +## Private Distributor List + +This list is intended to be used primarily to provide actionable information to +multiple distributor projects at once. This list is not intended for +individuals to find out about security issues. + +### Embargo Policy + +The information members receive on coredns-distributors-announce@lists.cncf.io must not be +made public, shared, nor even hinted at anywhere beyond the need-to-know within +your specific team except with the list's explicit approval. +This holds true until the public disclosure date/time that was agreed upon by the list. +Members of the list and others may not use the information for anything other +than getting the issue fixed for your respective distribution's users. + +Before any information from the list is shared with respective members of your +team required to fix said issue, they must agree to the same terms and only +find out information on a need-to-know basis. + +In the unfortunate event you share the information beyond what is allowed by +this policy, you _must_ urgently inform the security@coredns.io +mailing list of exactly what information leaked and to whom. + +If you continue to leak information and break the policy outlined here, you +will be removed from the list. + +### Contributing Back + +This is a team effort. As a member of the list you must carry some water. This +could be in the form of the following: + +**Technical** + +- Review and/or test the proposed patches and point out potential issues with + them (such as incomplete fixes for the originally reported issues, additional + issues you might notice, and newly introduced bugs), and inform the list of the + work done even if no issues were encountered. + +**Administrative** + +- Help draft emails to the public disclosure mailing list. +- Help with release notes. + +### Membership Criteria + +To be eligible for the coredns-distributors-announce@lists.cncf.io mailing list, your +distribution should: + +1. Be an active distributor of CoreDNS component. +2. Have a user base not limited to your own organization. +3. Have a publicly verifiable track record up to present day of fixing security + issues. +4. Not be a downstream or rebuild of another distributor. +5. Be a participant and active contributor in the community. +6. Accept the [Embargo Policy](#embargo-policy) that is outlined above. +7. Have someone already on the list vouch for the person requesting membership + on behalf of your distribution. + +### Requesting to Join + +New membership requests are sent to security@coredns.io. + +In the body of your request please specify how you qualify and fulfill each +criterion listed in [Membership Criteria](#membership-criteria). diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 120000 index 00000000000..637237e0964 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +.github/CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index f9e317ee17f..00000000000 --- a/SECURITY.md +++ /dev/null @@ -1,189 +0,0 @@ -# Security Release Process - -The CoreDNS project has adopted this security disclosures and response policy -to ensure responsible handling of critical issues. - - -## Product Security Team (PST) - -Security vulnerabilities should be handled quickly and sometimes privately. -The primary goal of this process is to reduce the total time users are vulnerable to publicly known exploits. - -The Product Security Team (PST) is responsible for organizing the entire response including internal communication and external disclosure. - -The initial Product Security Team will consist of the set of maintainers that volunteered. - -### mailing lists - -* security@coredns.io : for any security concerns. Received by Product Security Team members, and used by this Team to discuss security issues and fixes. -* coredns-distributors-announce@lists.cncf.io: for early private information on Security patch releases. see below how CoreDNS distributors can apply for this list. - - -## Disclosures - -### Private Disclosure Processes - -If you find a security vulnerability or any security related issues, -please DO NOT file a public issue. Do not create a Github issue. -Instead, send your report privately to security@coredns.io. -Security reports are greatly appreciated and we will publicly thank you for it. - -Please provide as much information as possible, so we can react quickly. -For instance, that could include: -- Description of the location and potential impact of the vulnerability; -- A detailed description of the steps required to reproduce the vulnerability (POC scripts, screenshots, and compressed packet captures are all helpful to us) -- Whatever else you think we might need to identify the source of this vulnerability - -### Public Disclosure Processes - -If you know of a publicly disclosed security vulnerability please IMMEDIATELY email security@coredns.io -to inform the Product Security Team (PST) about the vulnerability so we start the patch, release, and communication process. - -If possible the PST will ask the person making the public report if the issue can be handled via a private disclosure process -(for example if the full exploit details have not yet been published). -If the reporter denies the request for private disclosure, the PST will move swiftly with the fix and release process. -In extreme cases you can ask GitHub to delete the issue but this generally isn't necessary and is unlikely to make a public disclosure less damaging. - -## Patch, Release, and Public Communication - -For each vulnerability a member of the PST will volunteer to lead coordination with the "Fix Team" -and is responsible for sending disclosure emails to the rest of the community. -This lead will be referred to as the "Fix Lead." - -The role of Fix Lead should rotate round-robin across the PST. - -Note that given the current size of the CoreDNS community it is likely that the PST is the same as the "Fix team." -The PST may decide to bring in additional contributors for added expertise depending on the area of the code that contains the vulnerability. - -All of the timelines below are suggestions and assume a Private Disclosure. -If the Team is dealing with a Public Disclosure all timelines become ASAP. -If the fix relies on another upstream project's disclosure timeline, that will adjust the process as well. -We will work with the upstream project to fit their timeline and best protect our users. - -### Fix Team Organization - -These steps should be completed within the first 24 hours of disclosure. - -- The Fix Lead will work quickly to identify relevant engineers from the affected projects and - packages and CC those engineers into the disclosure thread. These selected developers are the Fix - Team. -- The Fix Lead will get the Fix Team access to private security repos to develop the fix. - - -### Fix Development Process - -These steps should be completed within the 1-7 days of Disclosure. - -- The Fix Lead and the Fix Team will create a - [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS - Calculator](https://www.first.org/cvss/calculator/3.0). The Fix Lead makes the final call on the - calculated CVSS; it is better to move quickly than making the CVSS perfect. -- The Fix Team will notify the Fix Lead that work on the fix branch is complete once there are LGTMs - on all commits in the private repo from one or more maintainers. - -If the CVSS score is under 4.0 ([a low severity -score](https://www.first.org/cvss/specification-document#i5)) the Fix Team can decide to slow the -release process down in the face of holidays, developer bandwidth, etc. These decisions must be -discussed on the security@coredns.io mailing list. - -### Fix Disclosure Process - -With the Fix Development underway the CoreDNS Security Team needs to come up with an overall communication plan for the wider community. -This Disclosure process should begin after the Team has developed a fix or mitigation -so that a realistic timeline can be communicated to users. - -**Disclosure of Forthcoming Fix to Users** (Completed within 1-7 days of Disclosure) - -- The Fix Lead will create a github issue in CoreDNS project to inform users that a security vulnerability -has been disclosed and that a fix will be made available, with an estimation of the Release Date. -It will include any mitigating steps users can take until a fix is available. - -The communication to users should be actionable. -They should know when to block time to apply patches, understand exact mitigation steps, etc. - -**Optional Fix Disclosure to Private Distributors List** (Completed within 1-14 days of Disclosure): - -- The Fix Lead will make a determination with the help of the Fix Team if an issue is critical enough to require early disclosure to distributors. -Generally this Private Distributor Disclosure process should be reserved for remotely exploitable or privilege escalation issues. -Otherwise, this process can be skipped. -- The Fix Lead will email the patches to coredns-distributors-announce@lists.cncf.io so distributors can prepare their own release to be available to users on the day of the issue's announcement. -Distributors should read about the [Private Distributor List](#private-distributor-list) to find out the requirements for being added to this list. -- **What if a distributor breaks embargo?** The PST will assess the damage and may make the call to release earlier or continue with the plan. -When in doubt push forward and go public ASAP. - -**Fix Release Day** (Completed within 1-21 days of Disclosure) - -- the Fix Team will selectively choose all needed commits from the Master branch in order to create a new release on top of the current last version released. -- Release process will be as usual. -- The Fix Lead will request a CVE from [DWF](https://github.com/distributedweaknessfiling/DWF-Documentation) - and include the CVSS and release details. -- The Fix Lead will inform all users, devs and integrators, now that everything is public, - announcing the new releases, the CVE number, and the relevant merged PRs to get wide distribution - and user action. As much as possible this email should be actionable and include links on how to apply - the fix to user's environments; this can include links to external distributor documentation. - - -## Private Distributor List - -This list is intended to be used primarily to provide actionable information to -multiple distributor projects at once. This list is not intended for -individuals to find out about security issues. - -### Embargo Policy - -The information members receive on coredns-distributors-announce@lists.cncf.io must not be -made public, shared, nor even hinted at anywhere beyond the need-to-know within -your specific team except with the list's explicit approval. -This holds true until the public disclosure date/time that was agreed upon by the list. -Members of the list and others may not use the information for anything other -than getting the issue fixed for your respective distribution's users. - -Before any information from the list is shared with respective members of your -team required to fix said issue, they must agree to the same terms and only -find out information on a need-to-know basis. - -In the unfortunate event you share the information beyond what is allowed by -this policy, you _must_ urgently inform the security@coredns.io -mailing list of exactly what information leaked and to whom. - -If you continue to leak information and break the policy outlined here, you -will be removed from the list. - -### Contributing Back - -This is a team effort. As a member of the list you must carry some water. This -could be in the form of the following: - -**Technical** - -- Review and/or test the proposed patches and point out potential issues with - them (such as incomplete fixes for the originally reported issues, additional - issues you might notice, and newly introduced bugs), and inform the list of the - work done even if no issues were encountered. - -**Administrative** - -- Help draft emails to the public disclosure mailing list. -- Help with release notes. - -### Membership Criteria - -To be eligible for the coredns-distributors-announce@lists.cncf.io mailing list, your -distribution should: - -1. Be an active distributor of CoreDNS component. -2. Have a user base not limited to your own organization. -3. Have a publicly verifiable track record up to present day of fixing security - issues. -4. Not be a downstream or rebuild of another distributor. -5. Be a participant and active contributor in the community. -6. Accept the [Embargo Policy](#embargo-policy) that is outlined above. -7. Have someone already on the list vouch for the person requesting membership - on behalf of your distribution. - -### Requesting to Join - -New membership requests are sent to security@coredns.io. - -In the body of your request please specify how you qualify and fulfill each -criterion listed in [Membership Criteria](#membership-criteria). diff --git a/SECURITY.md b/SECURITY.md new file mode 120000 index 00000000000..22125d94b98 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1 @@ +.github/SECURITY.md \ No newline at end of file From a34d564c0727957b293132b2824c288be054378a Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sat, 7 Dec 2019 07:47:18 -0800 Subject: [PATCH 296/324] Remove trailing whitespace as otherwise `make check` fails (#3515) This PR removes trailing whitespace as otherwise `make check` fails This PR fixes 3513 Signed-off-by: Yong Tang --- plugin/dnstap/dnstapio/io_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/dnstap/dnstapio/io_test.go b/plugin/dnstap/dnstapio/io_test.go index 93bfae88af9..4716b4fd473 100644 --- a/plugin/dnstap/dnstapio/io_test.go +++ b/plugin/dnstap/dnstapio/io_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/coredns/coredns/plugin/pkg/reuseport" - + tap "github.com/dnstap/golang-dnstap" fs "github.com/farsightsec/golang-framestream" ) From 0a6100048d3b8abaac1e5a3b0b9d08d4576920ce Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sat, 7 Dec 2019 07:49:38 -0800 Subject: [PATCH 297/324] Setup GitHub Workflow for auto `go tidy` (#3512) Setup GitHub Workflow for auto `go tidy`, when - 'go.mod' - 'go.sum' - '.github/workflows/go.tidy.yml' has been touched. Signed-off-by: Yong Tang --- .github/workflows/go.tidy.yml | 43 +++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 .github/workflows/go.tidy.yml diff --git a/.github/workflows/go.tidy.yml b/.github/workflows/go.tidy.yml new file mode 100644 index 00000000000..e64123b36a8 --- /dev/null +++ b/.github/workflows/go.tidy.yml @@ -0,0 +1,43 @@ +name: go tidy + +on: + push: + branches: + - 'master' + paths: + - 'go.mod' + - 'go.sum' + - '.github/workflows/go.tidy.yml' + +jobs: + fix: + runs-on: ubuntu-latest + steps: + - + name: Checkout + uses: actions/checkout@v1 + - + # https://github.com/actions/checkout/issues/6 + name: Fix detached HEAD + run: git checkout ${GITHUB_REF#refs/heads/} + - + name: Tidy + run: | + rm -f go.sum + go mod tidy + - + name: Set up Git + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + git config user.name "${GITHUB_ACTOR}" + git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git + - + name: Commit and push changes + run: | + git add . + if output=$(git status --porcelain) && [ ! -z "$output" ]; then + git commit -m 'auto go tidy' + git push + fi From 35953235cf6138afd95bff3bf6b54465c66c5372 Mon Sep 17 00:00:00 2001 From: miekg Date: Sat, 7 Dec 2019 15:51:27 +0000 Subject: [PATCH 298/324] auto go tidy --- go.mod | 1 + go.sum | 31 ++----------------------------- 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index c520956eac8..e96f0189192 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect + github.com/apache/thrift v0.13.0 // indirect github.com/aws/aws-sdk-go v1.25.43 github.com/caddyserver/caddy v1.0.4 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe diff --git a/go.sum b/go.sum index 4f07a2a8b40..e7e4e11bc8d 100644 --- a/go.sum +++ b/go.sum @@ -4,7 +4,6 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR cloud.google.com/go v0.41.0 h1:NFvqUTDnSNYPX5oReekmB+D+90jrJIcVImxQ3qrBVgM= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= -github.com/Azure/azure-sdk-for-go v32.4.0+incompatible h1:1JP8SKfroEakYiQU2ZyPDosh8w2Tg9UopKt88VyQPt4= github.com/Azure/azure-sdk-for-go v32.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v32.6.0+incompatible h1:PgaVceWF5idtJajyt1rzq1cep6eRPJ8+8hs4GnNzTo0= github.com/Azure/azure-sdk-for-go v32.6.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -12,14 +11,12 @@ github.com/Azure/go-autorest v13.0.0+incompatible h1:56c11ykhsFSPNNQuS73Ri8h/ezq github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= @@ -44,7 +41,6 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= @@ -65,13 +61,10 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190808125512-07798873deee/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.36 h1:4+TL/Y2G5hsR1zdfHmjNG1ou1WEqsSWk8v7m1GaDKyo= -github.com/aws/aws-sdk-go v1.25.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.41 h1:/hj7nZ0586wFqpwjNpzWiUTwtaMgxAZNZKHay80MdXw= -github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.43 h1:R5YqHQFIulYVfgRySz9hvBRTWBjudISa+r0C8XQ1ufg= github.com/aws/aws-sdk-go v1.25.43/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= @@ -224,7 +217,6 @@ github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -234,7 +226,6 @@ github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df/go.mod h1:QMZY7/J/KSQEhK github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 h1:d3VSuNcgTCn21dNMm8g412Fck/XWFmMj4nJhhHT7ZZ0= github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062/go.mod h1:PcNJqIlcX/dj3DTG/+QQnRvSgTMG6CLpRMjWcv4+J6w= @@ -245,7 +236,6 @@ github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -254,7 +244,6 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -336,26 +325,22 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= @@ -378,7 +363,6 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= @@ -386,7 +370,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -432,7 +415,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -443,7 +425,6 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -463,10 +444,8 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823 h1:Ypyv6BNJh07T1pUSrehkLemqPKXhus2MkfktJ91kRh4= @@ -498,13 +477,11 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -516,7 +493,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w= golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -534,7 +510,6 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18 h1:xFbv3LvlvQAmbNJFCBKRv1Ccvnh9FVsW0FX2kTWWowE= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -555,7 +530,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190626174449-989357319d63 h1:UsSJe9fhWNSz6emfIGPpH5DF23t7ALo2Pf3sC+/hsdg= google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -599,7 +573,6 @@ honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190620084959-7cf5895f2711 h1:BblVYz/wE5WtBsD/Gvu54KyBUTJMflolzc5I2DTvh50= k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= From 5cc30319d2e5b31c8242e96fcc738255e8659e8b Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sat, 7 Dec 2019 20:02:51 +0000 Subject: [PATCH 299/324] Upate Signed-off-by: Yong Tang --- go.mod | 8 ++++---- go.sum | 8 ++++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index e96f0189192..93356d71763 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/imdario/mergo v0.3.7 // indirect github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/miekg/dns v1.1.22 + github.com/miekg/dns v1.1.24 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 @@ -37,9 +37,9 @@ require ( github.com/tinylib/msgp v1.1.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a - golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc - golang.org/x/net v0.0.0-20191003171128-d98b1b443823 // indirect - golang.org/x/sys v0.0.0-20191010194322-b09406accb47 + golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663 // indirect + golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 diff --git a/go.sum b/go.sum index e7e4e11bc8d..f6a8c8ff0e4 100644 --- a/go.sum +++ b/go.sum @@ -277,6 +277,8 @@ github.com/mholt/certmagic v0.8.3/go.mod h1:91uJzK5K8IWtYQqTi5R2tsxV1pCde+wdGfaR github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.24 h1:6G8Eop/HM8hpagajbn0rFQvAKZWiiCa8P6N2I07+wwI= +github.com/miekg/dns v1.1.24/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0= @@ -418,6 +420,8 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -450,6 +454,8 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823 h1:Ypyv6BNJh07T1pUSrehkLemqPKXhus2MkfktJ91kRh4= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663 h1:Dd5RoEW+yQi+9DMybroBctIdyiwuNT7sJFMC27/6KxI= +golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -485,6 +491,8 @@ golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab h1:FvshnhkKW+LO3HWHodML8kuVX8rnJTxKm9dFPuI68UM= +golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= From a2481250e0bfeaa48159b744db62c8e0128415d4 Mon Sep 17 00:00:00 2001 From: yongtang Date: Sat, 7 Dec 2019 20:05:52 +0000 Subject: [PATCH 300/324] auto go tidy --- go.sum | 7 ------- 1 file changed, 7 deletions(-) diff --git a/go.sum b/go.sum index f6a8c8ff0e4..f2d42ef5da2 100644 --- a/go.sum +++ b/go.sum @@ -275,8 +275,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/certmagic v0.8.3/go.mod h1:91uJzK5K8IWtYQqTi5R2tsxV1pCde+wdGfaRaOZi6aQ= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= -github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.24 h1:6G8Eop/HM8hpagajbn0rFQvAKZWiiCa8P6N2I07+wwI= github.com/miekg/dns v1.1.24/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -418,8 +416,6 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4= -golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -452,8 +448,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191003171128-d98b1b443823 h1:Ypyv6BNJh07T1pUSrehkLemqPKXhus2MkfktJ91kRh4= -golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663 h1:Dd5RoEW+yQi+9DMybroBctIdyiwuNT7sJFMC27/6KxI= golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -489,7 +483,6 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab h1:FvshnhkKW+LO3HWHodML8kuVX8rnJTxKm9dFPuI68UM= golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From 6cf1c086aca54d44a63cd24803ec429dd08ef9a2 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Sat, 7 Dec 2019 15:45:27 -0800 Subject: [PATCH 301/324] Update auto go mod tidy workflow to remove user association (#3517) This PR updates go.tidy.yml, and use pseudo name `coredns-auto-go-mod-tidy[bot]`, so that the commit is not associated with real user. Otherwise the commit history could be confusing. This is similar to what `dependabot[bot]` is doing for commits generated by bots. Signed-off-by: Yong Tang --- .github/workflows/go.tidy.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/go.tidy.yml b/.github/workflows/go.tidy.yml index e64123b36a8..3c6aa2be23d 100644 --- a/.github/workflows/go.tidy.yml +++ b/.github/workflows/go.tidy.yml @@ -5,9 +5,9 @@ on: branches: - 'master' paths: + - '.github/workflows/go.tidy.yml' - 'go.mod' - 'go.sum' - - '.github/workflows/go.tidy.yml' jobs: fix: @@ -30,14 +30,14 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - git config user.name "${GITHUB_ACTOR}" - git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" + git config user.name "coredns-auto-go-mod-tidy[bot]" + git config user.email "coredns-auto-go-mod-tidy[bot]@users.noreply.github.com" git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git - name: Commit and push changes run: | git add . if output=$(git status --porcelain) && [ ! -z "$output" ]; then - git commit -m 'auto go tidy' + git commit -m 'auto go mod tidy' git push fi From 6a0403d88ba9b2c82eb34923232ea49405b7a61f Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2019 06:04:15 -0800 Subject: [PATCH 302/324] build(deps): bump github.com/aws/aws-sdk-go from 1.25.43 to 1.25.48 (#3526) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.25.43 to 1.25.48. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.25.43...v1.25.48) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 93356d71763..22eb5236df8 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.13.0 // indirect - github.com/aws/aws-sdk-go v1.25.43 + github.com/aws/aws-sdk-go v1.25.48 github.com/caddyserver/caddy v1.0.4 github.com/coredns/federation v0.0.0-20190818181423-e032b096babe github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect diff --git a/go.sum b/go.sum index f2d42ef5da2..83c223744a7 100644 --- a/go.sum +++ b/go.sum @@ -67,6 +67,8 @@ github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.43 h1:R5YqHQFIulYVfgRySz9hvBRTWBjudISa+r0C8XQ1ufg= github.com/aws/aws-sdk-go v1.25.43/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= +github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= From 9cd287c191ce076911e410e850ef10545b95c5bb Mon Sep 17 00:00:00 2001 From: "coredns-auto-go-mod-tidy[bot]" Date: Mon, 9 Dec 2019 14:06:00 +0000 Subject: [PATCH 303/324] auto go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 83c223744a7..0d45958b9ae 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,6 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.43 h1:R5YqHQFIulYVfgRySz9hvBRTWBjudISa+r0C8XQ1ufg= -github.com/aws/aws-sdk-go v1.25.43/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= From ea97a3b22a87f3941d359fd29d1e40b17f238be3 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2019 06:07:01 -0800 Subject: [PATCH 304/324] build(deps): bump github.com/Azure/go-autorest/autorest/azure/auth (#3523) Bumps [github.com/Azure/go-autorest/autorest/azure/auth](https://github.com/Azure/go-autorest) from 0.4.0 to 0.4.1. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/tracing/v0.4.0...autorest/azure/auth/v0.4.1) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 22eb5236df8..bac6164bee8 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go v0.41.0 // indirect github.com/Azure/azure-sdk-for-go v32.6.0+incompatible github.com/Azure/go-autorest/autorest v0.9.2 - github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 + github.com/Azure/go-autorest/autorest/azure/auth v0.4.1 github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect github.com/apache/thrift v0.13.0 // indirect diff --git a/go.sum b/go.sum index 0d45958b9ae..f70e0ef5495 100644 --- a/go.sum +++ b/go.sum @@ -20,12 +20,18 @@ github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEg github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.1 h1:VDSqmaEc8ECZdfavoa1KmVpIVTGTc+v/2jvHGmCYvSE= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.1/go.mod h1:5TgH20II424SXIV9YDBsO4rBCKsh39Vbx9DvhJZZ8rU= github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= From d708ca0f4d06da76d0acb0b7c2e97ec0aeac4151 Mon Sep 17 00:00:00 2001 From: "coredns-auto-go-mod-tidy[bot]" Date: Mon, 9 Dec 2019 14:08:43 +0000 Subject: [PATCH 305/324] auto go mod tidy --- go.sum | 7 ------- 1 file changed, 7 deletions(-) diff --git a/go.sum b/go.sum index f70e0ef5495..374cbfaa377 100644 --- a/go.sum +++ b/go.sum @@ -17,19 +17,12 @@ github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+B github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI= -github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= github.com/Azure/go-autorest/autorest/azure/auth v0.4.1 h1:VDSqmaEc8ECZdfavoa1KmVpIVTGTc+v/2jvHGmCYvSE= github.com/Azure/go-autorest/autorest/azure/auth v0.4.1/go.mod h1:5TgH20II424SXIV9YDBsO4rBCKsh39Vbx9DvhJZZ8rU= github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= From 864a108ed0185f100de5956ad0cbccc5abcd35a9 Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2019 06:36:45 -0800 Subject: [PATCH 306/324] build(deps): bump github.com/Azure/go-autorest/autorest (#3524) Bumps [github.com/Azure/go-autorest/autorest](https://github.com/Azure/go-autorest) from 0.9.2 to 0.9.3. - [Release notes](https://github.com/Azure/go-autorest/releases) - [Changelog](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) - [Commits](https://github.com/Azure/go-autorest/compare/autorest/v0.9.2...autorest/v0.9.3) Signed-off-by: dependabot-preview[bot] --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index bac6164bee8..97089414e4e 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.12 require ( cloud.google.com/go v0.41.0 // indirect github.com/Azure/azure-sdk-for-go v32.6.0+incompatible - github.com/Azure/go-autorest/autorest v0.9.2 + github.com/Azure/go-autorest/autorest v0.9.3 github.com/Azure/go-autorest/autorest/azure/auth v0.4.1 github.com/DataDog/datadog-go v2.2.0+incompatible // indirect github.com/Shopify/sarama v1.21.0 // indirect diff --git a/go.sum b/go.sum index 374cbfaa377..1b8a3ccf5cf 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,8 @@ github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56T github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= From acea8f535f4b73dd0fcde9867af18947586e3c35 Mon Sep 17 00:00:00 2001 From: "coredns-auto-go-mod-tidy[bot]" Date: Mon, 9 Dec 2019 14:38:38 +0000 Subject: [PATCH 307/324] auto go mod tidy --- go.sum | 1 - 1 file changed, 1 deletion(-) diff --git a/go.sum b/go.sum index 1b8a3ccf5cf..71004e5541c 100644 --- a/go.sum +++ b/go.sum @@ -12,7 +12,6 @@ github.com/Azure/go-autorest v13.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= From 5c5f18c50c9e1b128b074f01b27bfddc3535608b Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Wed, 11 Dec 2019 06:05:44 -0800 Subject: [PATCH 308/324] Bump version to 1.6.6 (#3520) For release tracking 3519 Signed-off-by: Yong Tang --- coremain/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coremain/version.go b/coremain/version.go index 53bc9714b86..a0a323921b8 100644 --- a/coremain/version.go +++ b/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.6.5" + CoreVersion = "1.6.6" coreName = "CoreDNS" serverType = "dns" ) From 2e7dbfdcf01645067e653caf83b587b212d8e5b0 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Wed, 11 Dec 2019 06:29:37 -0800 Subject: [PATCH 309/324] Update github.com/miekg/dns to v1.1.25 (#3529) Signed-off-by: Yong Tang --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 97089414e4e..92f36b2e6a6 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/imdario/mergo v0.3.7 // indirect github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/miekg/dns v1.1.24 + github.com/miekg/dns v1.1.25 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 @@ -38,8 +38,8 @@ require ( github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 - golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663 // indirect - golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab + golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect + golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 diff --git a/go.sum b/go.sum index 71004e5541c..5455e1f0660 100644 --- a/go.sum +++ b/go.sum @@ -275,8 +275,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/certmagic v0.8.3/go.mod h1:91uJzK5K8IWtYQqTi5R2tsxV1pCde+wdGfaRaOZi6aQ= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.24 h1:6G8Eop/HM8hpagajbn0rFQvAKZWiiCa8P6N2I07+wwI= -github.com/miekg/dns v1.1.24/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0= @@ -448,8 +448,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663 h1:Dd5RoEW+yQi+9DMybroBctIdyiwuNT7sJFMC27/6KxI= -golang.org/x/net v0.0.0-20191207000613-e7e4b65ae663/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -484,8 +484,8 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab h1:FvshnhkKW+LO3HWHodML8kuVX8rnJTxKm9dFPuI68UM= -golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= From fdb3cff87cd838a10e529c379a3d97205d3f08d1 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Wed, 11 Dec 2019 06:49:38 -0800 Subject: [PATCH 310/324] Regenerate man-page (#3530) Signed-off-by: Yong Tang --- man/coredns-acl.7 | 2 +- man/coredns-any.7 | 2 +- man/coredns-auto.7 | 2 +- man/coredns-autopath.7 | 2 +- man/coredns-azure.7 | 2 +- man/coredns-bind.7 | 2 +- man/coredns-bufsize.7 | 2 +- man/coredns-cache.7 | 10 +++++++++- man/coredns-cancel.7 | 2 +- man/coredns-chaos.7 | 2 +- man/coredns-clouddns.7 | 2 +- man/coredns-debug.7 | 2 +- man/coredns-dnssec.7 | 2 +- man/coredns-dnstap.7 | 2 +- man/coredns-erratic.7 | 2 +- man/coredns-errors.7 | 2 +- man/coredns-etcd.7 | 2 +- man/coredns-file.7 | 2 +- man/coredns-forward.7 | 2 +- man/coredns-grpc.7 | 2 +- man/coredns-health.7 | 2 +- man/coredns-hosts.7 | 2 +- man/coredns-import.7 | 2 +- man/coredns-k8s_external.7 | 2 +- man/coredns-kubernetes.7 | 2 +- man/coredns-loadbalance.7 | 2 +- man/coredns-log.7 | 2 +- man/coredns-loop.7 | 2 +- man/coredns-metadata.7 | 2 +- man/coredns-metrics.7 | 2 +- man/coredns-nsid.7 | 2 +- man/coredns-pprof.7 | 2 +- man/coredns-ready.7 | 2 +- man/coredns-reload.7 | 2 +- man/coredns-rewrite.7 | 2 +- man/coredns-root.7 | 2 +- man/coredns-route53.7 | 2 +- man/coredns-secondary.7 | 2 +- man/coredns-sign.7 | 32 ++++++++++++++++++++------------ man/coredns-template.7 | 2 +- man/coredns-tls.7 | 2 +- man/coredns-trace.7 | 14 +++++++------- man/coredns-transfer.7 | 2 +- man/coredns-whoami.7 | 2 +- 44 files changed, 77 insertions(+), 61 deletions(-) diff --git a/man/coredns-acl.7 b/man/coredns-acl.7 index 15c25af1dc8..d92594171c3 100644 --- a/man/coredns-acl.7 +++ b/man/coredns-acl.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ACL" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ACL" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .PP \fIacl\fP - enforces access control policies on source ip and prevents unauthorized access to DNS servers. diff --git a/man/coredns-any.7 b/man/coredns-any.7 index d79953bc25e..a745461bf32 100644 --- a/man/coredns-any.7 +++ b/man/coredns-any.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ANY" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ANY" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-auto.7 b/man/coredns-auto.7 index 22d4555bc65..c3b9250366c 100644 --- a/man/coredns-auto.7 +++ b/man/coredns-auto.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-AUTO" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-AUTO" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-autopath.7 b/man/coredns-autopath.7 index e493dc4514f..39792104782 100644 --- a/man/coredns-autopath.7 +++ b/man/coredns-autopath.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-AUTOPATH" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-AUTOPATH" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-azure.7 b/man/coredns-azure.7 index 42527ae20a1..db86a38ce53 100644 --- a/man/coredns-azure.7 +++ b/man/coredns-azure.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-AZURE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-AZURE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-bind.7 b/man/coredns-bind.7 index 942536b0f90..2cfc34a8ca4 100644 --- a/man/coredns-bind.7 +++ b/man/coredns-bind.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-BIND" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-BIND" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-bufsize.7 b/man/coredns-bufsize.7 index e337ef61f4f..e3e4e0e97b2 100644 --- a/man/coredns-bufsize.7 +++ b/man/coredns-bufsize.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-BUFSIZE" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-BUFSIZE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-cache.7 b/man/coredns-cache.7 index 5481ced31d7..0349af0f6c1 100644 --- a/man/coredns-cache.7 +++ b/man/coredns-cache.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CACHE" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CACHE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -48,6 +48,7 @@ cache [TTL] [ZONES...] { success CAPACITY [TTL] [MINTTL] denial CAPACITY [TTL] [MINTTL] prefetch AMOUNT [[DURATION] [PERCENTAGE%]] + serve\_stale [DURATION] } .fi @@ -70,6 +71,11 @@ Popular means \fBAMOUNT\fP queries have been seen with no gaps of \fBDURATION\fP \fBDURATION\fP defaults to 1m. Prefetching will happen when the TTL drops below \fBPERCENTAGE\fP, which defaults to \fB\fC10%\fR, or latest 1 second before TTL expiration. Values should be in the range \fB\fC[10%, 90%]\fR. Note the percent sign is mandatory. \fBPERCENTAGE\fP is treated as an \fB\fCint\fR. +.IP \(bu 4 +\fB\fCserve_stale\fR, when serve_stale is set, cache always will serve an expired entry to a client if there is one +available. When this happens, cache will attempt to refresh the cache entry after sending the expired cache +entry to the client. The responses have a TTL of 0. \fBDURATION\fP is how far back to consider +stale responses as fresh. The default duration is 1h. .SH "CAPACITY AND EVICTION" @@ -95,6 +101,8 @@ If monitoring is enabled (via the \fIprometheus\fP plugin) then the following me \fB\fCcoredns_cache_misses_total{server}\fR - Counter of cache misses. .IP \(bu 4 \fB\fCcoredns_cache_drops_total{server}\fR - Counter of dropped messages. +.IP \(bu 4 +\fB\fCcoredns_cache_served_stale_total{server}\fR - Counter of requests served from stale cache entries. .PP diff --git a/man/coredns-cancel.7 b/man/coredns-cancel.7 index a628d9ad05d..3cb6f62924d 100644 --- a/man/coredns-cancel.7 +++ b/man/coredns-cancel.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CANCEL" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CANCEL" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-chaos.7 b/man/coredns-chaos.7 index 1769f3d2dc2..07d7e5ee606 100644 --- a/man/coredns-chaos.7 +++ b/man/coredns-chaos.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CHAOS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CHAOS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-clouddns.7 b/man/coredns-clouddns.7 index a0cf3b23ac5..4783e028d15 100644 --- a/man/coredns-clouddns.7 +++ b/man/coredns-clouddns.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-CLOUDDNS" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-CLOUDDNS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-debug.7 b/man/coredns-debug.7 index a9eb0451121..a00c376ab08 100644 --- a/man/coredns-debug.7 +++ b/man/coredns-debug.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-DEBUG" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-DEBUG" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-dnssec.7 b/man/coredns-dnssec.7 index 07c096110ff..c9d944808cd 100644 --- a/man/coredns-dnssec.7 +++ b/man/coredns-dnssec.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-DNSSEC" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-DNSSEC" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-dnstap.7 b/man/coredns-dnstap.7 index 60fa63a49b9..e891614c5bd 100644 --- a/man/coredns-dnstap.7 +++ b/man/coredns-dnstap.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-DNSTAP" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-DNSTAP" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-erratic.7 b/man/coredns-erratic.7 index 8c97fc00901..774a478e62c 100644 --- a/man/coredns-erratic.7 +++ b/man/coredns-erratic.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ERRATIC" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ERRATIC" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-errors.7 b/man/coredns-errors.7 index 072f1c3ba0c..57adeb019b7 100644 --- a/man/coredns-errors.7 +++ b/man/coredns-errors.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ERRORS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ERRORS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-etcd.7 b/man/coredns-etcd.7 index 8221ed63e2a..f9784196ff3 100644 --- a/man/coredns-etcd.7 +++ b/man/coredns-etcd.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ETCD" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ETCD" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-file.7 b/man/coredns-file.7 index 6bf36036939..bdc4e1b049b 100644 --- a/man/coredns-file.7 +++ b/man/coredns-file.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-FILE" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FILE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-forward.7 b/man/coredns-forward.7 index 894fe713873..40cbd9b856f 100644 --- a/man/coredns-forward.7 +++ b/man/coredns-forward.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-FORWARD" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-FORWARD" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-grpc.7 b/man/coredns-grpc.7 index be33ab4988b..c0b68bba0cc 100644 --- a/man/coredns-grpc.7 +++ b/man/coredns-grpc.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-GRPC" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-GRPC" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-health.7 b/man/coredns-health.7 index adbbf4ea9c1..c8567a2e1bd 100644 --- a/man/coredns-health.7 +++ b/man/coredns-health.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-HEALTH" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HEALTH" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-hosts.7 b/man/coredns-hosts.7 index f31a08f3d76..2f7b6168fe0 100644 --- a/man/coredns-hosts.7 +++ b/man/coredns-hosts.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-HOSTS" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-HOSTS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-import.7 b/man/coredns-import.7 index ba3dfb52a1e..215a86b211e 100644 --- a/man/coredns-import.7 +++ b/man/coredns-import.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-IMPORT" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-IMPORT" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-k8s_external.7 b/man/coredns-k8s_external.7 index 1cabc3e7f3d..5b43fe125d9 100644 --- a/man/coredns-k8s_external.7 +++ b/man/coredns-k8s_external.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-K8S_EXTERNAL" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-K8S_EXTERNAL" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-kubernetes.7 b/man/coredns-kubernetes.7 index e3b5e317788..21596ed94d7 100644 --- a/man/coredns-kubernetes.7 +++ b/man/coredns-kubernetes.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-KUBERNETES" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-KUBERNETES" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-loadbalance.7 b/man/coredns-loadbalance.7 index 230421f76c6..0b34f522a4d 100644 --- a/man/coredns-loadbalance.7 +++ b/man/coredns-loadbalance.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-LOADBALANCE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-LOADBALANCE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-log.7 b/man/coredns-log.7 index d9d7f30fb7c..d1e7e653821 100644 --- a/man/coredns-log.7 +++ b/man/coredns-log.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-LOG" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-LOG" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-loop.7 b/man/coredns-loop.7 index 636d46ed623..b1baa91ea56 100644 --- a/man/coredns-loop.7 +++ b/man/coredns-loop.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-LOOP" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-LOOP" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-metadata.7 b/man/coredns-metadata.7 index 63af5a06609..7c6de649f25 100644 --- a/man/coredns-metadata.7 +++ b/man/coredns-metadata.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-METADATA" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-METADATA" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-metrics.7 b/man/coredns-metrics.7 index 6fd2f62157b..ca895cacbf0 100644 --- a/man/coredns-metrics.7 +++ b/man/coredns-metrics.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-METRICS" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-METRICS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-nsid.7 b/man/coredns-nsid.7 index 765c5b7ed76..359b79384cf 100644 --- a/man/coredns-nsid.7 +++ b/man/coredns-nsid.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-NSID" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-NSID" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-pprof.7 b/man/coredns-pprof.7 index 8a974144591..092336943d4 100644 --- a/man/coredns-pprof.7 +++ b/man/coredns-pprof.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-PPROF" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-PPROF" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-ready.7 b/man/coredns-ready.7 index f97a10323bd..fdcaad6c62c 100644 --- a/man/coredns-ready.7 +++ b/man/coredns-ready.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-READY" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-READY" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-reload.7 b/man/coredns-reload.7 index 97c81c9b9da..8265abc6595 100644 --- a/man/coredns-reload.7 +++ b/man/coredns-reload.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-RELOAD" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-RELOAD" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-rewrite.7 b/man/coredns-rewrite.7 index 898f7509da4..b7afff2be94 100644 --- a/man/coredns-rewrite.7 +++ b/man/coredns-rewrite.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-REWRITE" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-REWRITE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-root.7 b/man/coredns-root.7 index e5c882928d0..aee1865b5a4 100644 --- a/man/coredns-root.7 +++ b/man/coredns-root.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ROOT" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ROOT" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-route53.7 b/man/coredns-route53.7 index 8aa1badcda5..276b6a7a00a 100644 --- a/man/coredns-route53.7 +++ b/man/coredns-route53.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-ROUTE53" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-ROUTE53" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-secondary.7 b/man/coredns-secondary.7 index 9ca520eaaf6..b1ce5afbcd7 100644 --- a/man/coredns-secondary.7 +++ b/man/coredns-secondary.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-SECONDARY" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-SECONDARY" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-sign.7 b/man/coredns-sign.7 index bed4ff11d6d..7d780b60ba4 100644 --- a/man/coredns-sign.7 +++ b/man/coredns-sign.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-SIGN" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-SIGN" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -10,8 +10,7 @@ The \fIsign\fP plugin is used to sign (see RFC 6781) zones. In this process DNSSEC resource records are added. The signatures that sign the resource records sets have an expiration date, this means the signing process must be repeated before this expiration data is reached. Otherwise the zone's data -will go BAD (RFC 4035, Section 5.5). The \fIsign\fP plugin takes care of this. \fISign\fP works, but has -a couple of limitations, see the "Bugs" section. +will go BAD (RFC 4035, Section 5.5). The \fIsign\fP plugin takes care of this. .PP Only NSEC is supported, \fIsign\fP does not support NSEC3. @@ -43,9 +42,12 @@ the signature only has 14 days left before expiring. Both these dates are only checked on the SOA's signature(s). .IP \(bu 4 -Create signatures that have an inception of -3 hours (minus a jitter between 0 and 18 hours) +Create RRSIGs that have an inception of -3 hours (minus a jitter between 0 and 18 hours) and a expiration of +32 days for every given DNSKEY. .IP \(bu 4 +Add NSEC records for all names in the zone. The TTL for these is the negative cache TTL from the +SOA record. +.IP \(bu 4 Add or replace \fIall\fP apex CDS/CDNSKEY records with the ones derived from the given keys. For each key two CDS are created one with SHA1 and another with SHA256. .IP \(bu 4 @@ -54,8 +56,8 @@ overwrite \fIany\fP previous serial number. .PP -Thus there are two ways that dictate when a zone is signed. Normally every 6 days (plus jitter) it -will be resigned. If for some reason we fail this check, the 14 days before expiring kicks in. +There are two ways that dictate when a zone is signed. Normally every 6 days (plus jitter) it will +be resigned. If for some reason we fail this check, the 14 days before expiring kicks in. .PP Keys are named (following BIND9): \fB\fCK++.key\fR and \fB\fCK++.private\fR. @@ -200,8 +202,8 @@ This will lead to \fB\fCdb.example.org\fR be signed \fItwice\fP, as this entire you have specified the origins \fB\fCexample.org\fR and \fB\fCexample.net\fR in the server block. .PP -Forcibly resigning a zone can be accomplished by removing the signed zone file (CoreDNS will keep on -serving it from memory), and sending SIGUSR1 to the process to make it reload and resign the zone +Forcibly resigning a zone can be accomplished by removing the signed zone file (CoreDNS will keep +on serving it from memory), and sending SIGUSR1 to the process to make it reload and resign the zone file. .SH "ALSO SEE" @@ -210,11 +212,17 @@ The DNSSEC RFCs: RFC 4033, RFC 4034 and RFC 4035. And the BCP on DNSSEC, RFC 678 manual pages coredns-keygen(1) and dnssec-keygen(8). And the \fIfile\fP plugin's documentation. .PP -Coredns-keygen can be found at https://github.com/coredns/coredns-utils -\[la]https://github.com/coredns/coredns-utils\[ra] in the coredns-keygen directory. +Coredns-keygen can be found at +https://github.com/coredns/coredns-utils +\[la]https://github.com/coredns/coredns-utils\[ra] in the +coredns-keygen directory. + +.PP +Other useful DNSSEC tools can be found in ldns +\[la]https://nlnetlabs.nl/projects/ldns/about/\[ra], e.g. +\fB\fCldns-key2ds\fR to create DS records from DNSKEYs. .SH "BUGS" .PP -\fB\fCkeys directory\fR is not implemented. Glue records are currently signed, and no DS records are added -for child zones. +\fB\fCkeys directory\fR is not implemented. diff --git a/man/coredns-template.7 b/man/coredns-template.7 index 8ece38763bc..9f18b749629 100644 --- a/man/coredns-template.7 +++ b/man/coredns-template.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-TEMPLATE" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TEMPLATE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-tls.7 b/man/coredns-tls.7 index 1098a50ec5e..ef72fb1d98c 100644 --- a/man/coredns-tls.7 +++ b/man/coredns-tls.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-TLS" 7 "October 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TLS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-trace.7 b/man/coredns-trace.7 index eb5391ebf3e..bf58f3e9e1a 100644 --- a/man/coredns-trace.7 +++ b/man/coredns-trace.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-TRACE" 7 "August 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TRACE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP @@ -41,9 +41,9 @@ Additional features can be enabled with this syntax: .nf trace [ENDPOINT\-TYPE] [ENDPOINT] { - every AMOUNT - service NAME - client\_server + every AMOUNT + service NAME + client\_server } .fi @@ -132,9 +132,9 @@ Trace one query every 10000 queries, rename the service, and enable same span: .nf trace tracinghost:9411 { - every 10000 - service dnsproxy - client\_server + every 10000 + service dnsproxy + client\_server } .fi diff --git a/man/coredns-transfer.7 b/man/coredns-transfer.7 index 96091556c7d..6bb47364269 100644 --- a/man/coredns-transfer.7 +++ b/man/coredns-transfer.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-TRANSFER" 7 "November 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-TRANSFER" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP diff --git a/man/coredns-whoami.7 b/man/coredns-whoami.7 index f71cd891fad..860a4230c64 100644 --- a/man/coredns-whoami.7 +++ b/man/coredns-whoami.7 @@ -1,5 +1,5 @@ .\" Generated by Mmark Markdown Processer - mmark.miek.nl -.TH "COREDNS-WHOAMI" 7 "September 2019" "CoreDNS" "CoreDNS Plugins" +.TH "COREDNS-WHOAMI" 7 "December 2019" "CoreDNS" "CoreDNS Plugins" .SH "NAME" .PP From 66f1659d0952807f6b090992ac61232e2d8f9a57 Mon Sep 17 00:00:00 2001 From: Yong Tang Date: Wed, 11 Dec 2019 08:55:08 -0800 Subject: [PATCH 311/324] Add release note for v1.6.6 (#3531) * Add release note for v1.6.6 Signed-off-by: Yong Tang * Update release note to cover review comments Signed-off-by: Yong Tang --- notes/coredns-1.6.6.md | 50 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 notes/coredns-1.6.6.md diff --git a/notes/coredns-1.6.6.md b/notes/coredns-1.6.6.md new file mode 100644 index 00000000000..d0d78293c13 --- /dev/null +++ b/notes/coredns-1.6.6.md @@ -0,0 +1,50 @@ ++++ +title = "CoreDNS-1.6.6 Release" +description = "CoreDNS-1.6.6 Release Notes." +tags = ["Release", "1.6.6", "Notes"] +release = "1.6.6" +date = 2019-12-11T10:00:00+00:00 +author = "coredns" ++++ + +The CoreDNS team has released +[CoreDNS-1.6.6](https://github.com/coredns/coredns/releases/tag/v1.6.6). + +A fairly small release that polishes various plugins and fixes a bunch of bugs. + + +# Security + +The github.com/miekg/dns has been updated to v1.1.25 to fix a DNS related security +vulnerability (https://github.com/miekg/dns/issues/1043). + +# Plugins + +A new plugin [*bufsize*](/plugins/bufsize) has been added that prevents IP fragmentation +for the DNS Flag Day 2020 and to deal with DNS vulnerabilities. + +* [*cache*](/plugin/cache) added a `serve_stale` option similar to `unbound`'s `serve_expired`. +* [*sign*](/plugin/sign) fix signing of authoritative data that we are not authoritative for. +* [*transfer*](/plugin/transfer) fixed calling wg.Add in main goroutine to avoid race conditons. + +## Brought to You By + +Chris O'Haver +Gonzalo Paniagua Javier +Guangming Wang +Kohei Yoshida +Miciah Dashiel Butler Masters +Miek Gieben +Yong Tang +Zou Nengren + +## Noteworthy Changes + +* plugin/bufsize: A new bufsize plugin to prevent IP fragmentation and DNS Flag Day 2020 (https://github.com/coredns/coredns/pull/3401) +* plugin/transfer: Fixed calling wg.Add in main goroutine to avoid race conditions (https://github.com/coredns/coredns/pull/3433) +* plugin/pprof: Fixed a reloading issue (https://github.com/coredns/coredns/pull/3454) +* plugin/health: Fixed a reloading issue (https://github.com/coredns/coredns/pull/3473) +* plugin/redy: Fixed a reloading issue (https://github.com/coredns/coredns/pull/3473) +* plugin/cache: Added a `serve_stale` option similar to `unbound`'s `serve_expired` (https://github.com/coredns/coredns/pull/3468) +* plugin/sign: Fix signing of authoritative data (https://github.com/coredns/coredns/pull/3479) +* pkg/reuseport: Move the core server listening functions to a new package (https://github.com/coredns/coredns/pull/3455) From 6a7a75e0cc14159177e604d0157836cc32add343 Mon Sep 17 00:00:00 2001 From: Kohei Yoshida <14937183+ykhr53@users.noreply.github.com> Date: Wed, 11 Dec 2019 19:16:53 +0000 Subject: [PATCH 312/324] fixed typo (#3532) Signed-off-by: ykhr53 --- notes/coredns-1.6.6.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notes/coredns-1.6.6.md b/notes/coredns-1.6.6.md index d0d78293c13..d29816c13fe 100644 --- a/notes/coredns-1.6.6.md +++ b/notes/coredns-1.6.6.md @@ -20,7 +20,7 @@ vulnerability (https://github.com/miekg/dns/issues/1043). # Plugins -A new plugin [*bufsize*](/plugins/bufsize) has been added that prevents IP fragmentation +A new plugin [*bufsize*](/plugin/bufsize) has been added that prevents IP fragmentation for the DNS Flag Day 2020 and to deal with DNS vulnerabilities. * [*cache*](/plugin/cache) added a `serve_stale` option similar to `unbound`'s `serve_expired`. From ce59f0f55a90b0c1151c97d56b94910a2761e584 Mon Sep 17 00:00:00 2001 From: Benjamin Bennett Date: Tue, 9 Jul 2019 15:38:18 -0400 Subject: [PATCH 313/324] UPSTREAM: : openshift: Changed the OWNERS to OpenShift ones 1. Why is this pull request needed and what does it do? Put OpenShift users in the OWNERS file so that we can maintain this fork. --- OWNERS | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 OWNERS diff --git a/OWNERS b/OWNERS new file mode 100644 index 00000000000..352c1475c29 --- /dev/null +++ b/OWNERS @@ -0,0 +1,23 @@ +approvers: + - knobunc + - smarterclayton + - ironcladlou + - danehans + - Miciah + +features: + - comments + - reviewers + - aliases + - branches + - exec + +aliases: + - | + /plugin: (.*) -> /label add: plugin/$1 + - | + /approve -> /lgtm + - | + /wai -> /label add: works as intended + - | + /release: (.*) -> /exec: /opt/bin/release-coredns $1 From b65c0ca72f7b6cf4632a4fe25f65a0d3e308fb35 Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Wed, 29 Aug 2018 16:04:26 -0400 Subject: [PATCH 314/324] UPSTREAM: : openshift: Add a product build pipeline Dockerfile --- images/coredns/Dockerfile | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 images/coredns/Dockerfile diff --git a/images/coredns/Dockerfile b/images/coredns/Dockerfile new file mode 100644 index 00000000000..b395be4cee3 --- /dev/null +++ b/images/coredns/Dockerfile @@ -0,0 +1,22 @@ +FROM centos:7 + +RUN yum install -y golang + +ENV GOPATH /go +RUN mkdir $GOPATH + +COPY . $GOPATH/src/github.com/coredns/coredns + +RUN cd $GOPATH/src/github.com/coredns/coredns \ + && go build -o coredns . \ + && cp $GOPATH/src/github.com/coredns/coredns/coredns /usr/bin/ + +RUN yum remove -y golang + +ENTRYPOINT ["/usr/bin/coredns"] + +USER 1001 + +LABEL io.k8s.display-name="CoreDNS" \ + io.k8s.description="CoreDNS is a DNS server that chains plugins" \ + maintainer="coredns-discuss@googlegroups.com" From 394b61745c744fccaabc592594054a3a6428f2ef Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Thu, 6 Sep 2018 17:11:45 -0400 Subject: [PATCH 315/324] UPSTREAM: : openshift: Update metadata --- images/coredns/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/images/coredns/Dockerfile b/images/coredns/Dockerfile index b395be4cee3..b94a2657894 100644 --- a/images/coredns/Dockerfile +++ b/images/coredns/Dockerfile @@ -18,5 +18,5 @@ ENTRYPOINT ["/usr/bin/coredns"] USER 1001 LABEL io.k8s.display-name="CoreDNS" \ - io.k8s.description="CoreDNS is a DNS server that chains plugins" \ - maintainer="coredns-discuss@googlegroups.com" + io.k8s.description="CoreDNS delivers the DNS and Discovery Service for a Kubernetes cluster." \ + maintainer="aos-devel@redhat.com" From f77295fe61f5d769d14676e7402f62a4b6a03ffa Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Fri, 7 Sep 2018 10:30:58 -0400 Subject: [PATCH 316/324] UPSTREAM: : openshift: Use multistage builds --- images/coredns/Dockerfile | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/images/coredns/Dockerfile b/images/coredns/Dockerfile index b94a2657894..e9ccc766362 100644 --- a/images/coredns/Dockerfile +++ b/images/coredns/Dockerfile @@ -1,22 +1,14 @@ -FROM centos:7 - -RUN yum install -y golang - -ENV GOPATH /go -RUN mkdir $GOPATH - -COPY . $GOPATH/src/github.com/coredns/coredns +FROM openshift/origin-release:golang-1.10 as build +WORKDIR /go/src/github.com/coredns/coredns +ENV GOPATH=/go +COPY . . +RUN go build -o coredns . -RUN cd $GOPATH/src/github.com/coredns/coredns \ - && go build -o coredns . \ - && cp $GOPATH/src/github.com/coredns/coredns/coredns /usr/bin/ - -RUN yum remove -y golang +FROM centos:7 +COPY --from=build /go/src/github.com/coredns/coredns/coredns /usr/bin/coredns ENTRYPOINT ["/usr/bin/coredns"] -USER 1001 - LABEL io.k8s.display-name="CoreDNS" \ io.k8s.description="CoreDNS delivers the DNS and Discovery Service for a Kubernetes cluster." \ - maintainer="aos-devel@redhat.com" + maintainer="dev@lists.openshift.redhat.com" From 40c11e7a73ecb72d13b0067d9fa57d296d72e8c8 Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Tue, 13 Nov 2018 10:09:02 -0500 Subject: [PATCH 317/324] UPSTREAM: : openshift: Add a RHEL7 Dockerfile and standardize format --- .dockerignore | 2 -- Dockerfile.openshift | 13 +++++++++++++ .../Dockerfile => Dockerfile.openshift.rhel7 | 7 +++---- 3 files changed, 16 insertions(+), 6 deletions(-) delete mode 100644 .dockerignore create mode 100644 Dockerfile.openshift rename images/coredns/Dockerfile => Dockerfile.openshift.rhel7 (60%) diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 1ff16c577e9..00000000000 --- a/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!coredns diff --git a/Dockerfile.openshift b/Dockerfile.openshift new file mode 100644 index 00000000000..621cbb5d673 --- /dev/null +++ b/Dockerfile.openshift @@ -0,0 +1,13 @@ +FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 AS builder +WORKDIR /go/src/github.com/coredns/coredns +COPY . . +RUN go build -o coredns . + +FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base +COPY --from=builder /go/src/github.com/coredns/coredns/coredns /usr/bin/coredns + +ENTRYPOINT ["/usr/bin/coredns"] + +LABEL io.k8s.display-name="CoreDNS" \ + io.k8s.description="CoreDNS delivers the DNS and Discovery Service for a Kubernetes cluster." \ + maintainer="dev@lists.openshift.redhat.com" diff --git a/images/coredns/Dockerfile b/Dockerfile.openshift.rhel7 similarity index 60% rename from images/coredns/Dockerfile rename to Dockerfile.openshift.rhel7 index e9ccc766362..4be58f6357d 100644 --- a/images/coredns/Dockerfile +++ b/Dockerfile.openshift.rhel7 @@ -1,11 +1,10 @@ -FROM openshift/origin-release:golang-1.10 as build +FROM registry.svc.ci.openshift.org/ocp/builder:golang-1.10 AS builder WORKDIR /go/src/github.com/coredns/coredns -ENV GOPATH=/go COPY . . RUN go build -o coredns . -FROM centos:7 -COPY --from=build /go/src/github.com/coredns/coredns/coredns /usr/bin/coredns +FROM registry.svc.ci.openshift.org/ocp/4.0:base +COPY --from=builder /go/src/github.com/coredns/coredns/coredns /usr/bin/coredns ENTRYPOINT ["/usr/bin/coredns"] From 73e0eb3797b4588b5e9edc79851a312ad1c03cbe Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Tue, 13 Nov 2018 11:13:55 -0500 Subject: [PATCH 318/324] UPSTREAM: : openshift: Optimize COPY directive --- Dockerfile.openshift | 2 +- Dockerfile.openshift.rhel7 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.openshift b/Dockerfile.openshift index 621cbb5d673..73eb839c8f2 100644 --- a/Dockerfile.openshift +++ b/Dockerfile.openshift @@ -4,7 +4,7 @@ COPY . . RUN go build -o coredns . FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base -COPY --from=builder /go/src/github.com/coredns/coredns/coredns /usr/bin/coredns +COPY --from=builder /go/src/github.com/coredns/coredns/coredns /usr/bin/ ENTRYPOINT ["/usr/bin/coredns"] diff --git a/Dockerfile.openshift.rhel7 b/Dockerfile.openshift.rhel7 index 4be58f6357d..14539b7969f 100644 --- a/Dockerfile.openshift.rhel7 +++ b/Dockerfile.openshift.rhel7 @@ -4,7 +4,7 @@ COPY . . RUN go build -o coredns . FROM registry.svc.ci.openshift.org/ocp/4.0:base -COPY --from=builder /go/src/github.com/coredns/coredns/coredns /usr/bin/coredns +COPY --from=builder /go/src/github.com/coredns/coredns/coredns /usr/bin/ ENTRYPOINT ["/usr/bin/coredns"] From a6e80500e6504b960445e406869e9c86bf1a2316 Mon Sep 17 00:00:00 2001 From: Ben Browning Date: Tue, 5 Feb 2019 20:03:39 -0500 Subject: [PATCH 319/324] UPSTREAM: : openshift: Add a `make test` target Previous coredns versions had a `make test` target and our ci-operator currently uses this target to kick off the tests. So, at least to get tests running against this PR without requiring changes to the openshift/release ci-operator setup, I'm adding that target back. This just runs the full litany of tests that Travis would run except for the coverage targets and the tests that require a running etcd server. --- Makefile | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/Makefile b/Makefile index e82ffc7e96d..161997ee36d 100644 --- a/Makefile +++ b/Makefile @@ -78,3 +78,19 @@ presubmit: clean: go clean rm -f coredns + +.PHONY: dep-ensure +dep-ensure: + dep version || go get -u github.com/golang/dep/cmd/dep + dep ensure -v + dep prune -v + find vendor -name '*_test.go' -delete + +.PHONY: test +test: check + ( cd request ; go test -v -race ./... ) + ( cd core ; go test -v -race ./... ) + ( cd coremain ; go test -v -race ./... ) + ( cd test ; go test -v -race ./... ) + ( cd plugin ; go test -v -race ./... ) + From 973ad68606137bbcc7ac55e3c827075f49408401 Mon Sep 17 00:00:00 2001 From: Antoni Segura Puimedon Date: Thu, 4 Jul 2019 03:07:25 +0200 Subject: [PATCH 320/324] UPSTREAM: : openshift: Set Dockerfiles to use vendor --- Dockerfile.openshift | 4 ++-- Dockerfile.openshift.rhel7 | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile.openshift b/Dockerfile.openshift index 73eb839c8f2..d4f5ab3c3da 100644 --- a/Dockerfile.openshift +++ b/Dockerfile.openshift @@ -1,7 +1,7 @@ -FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 AS builder +FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 AS builder WORKDIR /go/src/github.com/coredns/coredns COPY . . -RUN go build -o coredns . +RUN GO111MODULE=on GOFLAGS=-mod=vendor go build -o coredns . FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base COPY --from=builder /go/src/github.com/coredns/coredns/coredns /usr/bin/ diff --git a/Dockerfile.openshift.rhel7 b/Dockerfile.openshift.rhel7 index 14539b7969f..682a2cdb484 100644 --- a/Dockerfile.openshift.rhel7 +++ b/Dockerfile.openshift.rhel7 @@ -1,7 +1,7 @@ -FROM registry.svc.ci.openshift.org/ocp/builder:golang-1.10 AS builder +FROM registry.svc.ci.openshift.org/ocp/builder:golang-1.12 AS builder WORKDIR /go/src/github.com/coredns/coredns COPY . . -RUN go build -o coredns . +RUN GO111MODULE=on GOFLAGS=-mod=vendor go build -o coredns . FROM registry.svc.ci.openshift.org/ocp/4.0:base COPY --from=builder /go/src/github.com/coredns/coredns/coredns /usr/bin/ From 07acbeb99fcd8c2ccdb6fc84ddb3d74d13e6ea9f Mon Sep 17 00:00:00 2001 From: "Brad P. Crochet" Date: Wed, 22 Jan 2020 13:50:32 +0100 Subject: [PATCH 321/324] UPSTREAM: : openshift: Make coredns exclusion more specific The coredns exclusion in .gitignore was ignoring anything that was named coredns. This patch makes it specifically just the binary in the main dir. --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index ec03a44a4aa..e4585f723b3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,7 @@ query.log Corefile *.swp -coredns +^coredns$ coredns.exe coredns.exe~ kubectl From c2d922636c6234174f16653e4547ada0c1372485 Mon Sep 17 00:00:00 2001 From: Antoni Segura Puimedon Date: Wed, 10 Jul 2019 16:18:09 +0200 Subject: [PATCH 322/324] UPSTREAM: : openshift: Add github.com/openshift/coredns-mdns plugin This plugin allows for discovering hosts via mDNS browsing, which means that those nodes are then exposed via normal unicast DNS. --- core/dnsserver/zdirectives.go | 1 + core/plugin/zplugin.go | 1 + plugin.cfg | 1 + plugin/chaos/zowners.go | 3 ++- 4 files changed, 5 insertions(+), 1 deletion(-) diff --git a/core/dnsserver/zdirectives.go b/core/dnsserver/zdirectives.go index 61d96c6339f..d724696486c 100644 --- a/core/dnsserver/zdirectives.go +++ b/core/dnsserver/zdirectives.go @@ -32,6 +32,7 @@ var Directives = []string{ "chaos", "loadbalance", "cache", + "mdns", "rewrite", "dnssec", "autopath", diff --git a/core/plugin/zplugin.go b/core/plugin/zplugin.go index 90267f29bc4..c7be342edc4 100644 --- a/core/plugin/zplugin.go +++ b/core/plugin/zplugin.go @@ -49,4 +49,5 @@ import ( _ "github.com/coredns/coredns/plugin/transfer" _ "github.com/coredns/coredns/plugin/whoami" _ "github.com/coredns/federation" + _ "github.com/openshift/coredns-mdns" ) diff --git a/plugin.cfg b/plugin.cfg index 3f3dd85fd67..740d9bbaf7d 100644 --- a/plugin.cfg +++ b/plugin.cfg @@ -41,6 +41,7 @@ any:any chaos:chaos loadbalance:loadbalance cache:cache +mdns:github.com/openshift/coredns-mdns rewrite:rewrite dnssec:dnssec autopath:autopath diff --git a/plugin/chaos/zowners.go b/plugin/chaos/zowners.go index d7ec383dbad..60645e53ed5 100644 --- a/plugin/chaos/zowners.go +++ b/plugin/chaos/zowners.go @@ -1,4 +1,5 @@ package chaos // Owners are all GitHub handlers of all maintainers. -var Owners = []string{"bradbeam", "chrisohaver", "darshanime", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "ihac", "inigohu", "isolus", "johnbelamaric", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "stp-ip", "superq", "varyoo", "ykhr53", "yongtang"} +var Owners = []string{"Miciah", "bradbeam", "chrisohaver", "danehans", "dilyevsky", "ekleiner", "fastest963", "greenpau", "grobie", "inigohu", "ironcladlou", "isolus", "johnbelamaric", "knobunc", "miekg", "nchrisdk", "nitisht", "pmoroney", "rajansandeep", "rdrozhdzh", "rtreffer", "smarterclayton", "superq", "varyoo", "yongtang"} + From 5e1e5be0edcaf15d24811e8cf791684263e09e50 Mon Sep 17 00:00:00 2001 From: "Brad P. Crochet" Date: Thu, 16 Jan 2020 14:47:39 -0500 Subject: [PATCH 323/324] UPSTREAM: : openshift: Fix HostPortOrFile to support IPv6 addresses with zone (#3527) 1. The HostPortOrFile tests don't have any IPv6 tests. This adds some. 2. The HostPortOrFile breaks if any of the addresses have IPv6 zone defined. ParseIP does not handle %zone anymore. Signed-off-by: Brad P. Crochet --- plugin/pkg/parse/host.go | 19 +++++++++++++++---- plugin/pkg/parse/host_test.go | 20 ++++++++++++++++++++ 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/plugin/pkg/parse/host.go b/plugin/pkg/parse/host.go index 87177125fb1..c1b7d23e0fb 100644 --- a/plugin/pkg/parse/host.go +++ b/plugin/pkg/parse/host.go @@ -4,12 +4,23 @@ import ( "fmt" "net" "os" + "strings" "github.com/coredns/coredns/plugin/pkg/transport" "github.com/miekg/dns" ) +// Strips the zone, but preserves any port that comes after the zone +func stripZone(host string) string { + if strings.Contains(host, "%") { + lastPercent := strings.LastIndex(host, "%") + newHost := host[:lastPercent] + return newHost + } + return host +} + // HostPortOrFile parses the strings in s, each string can either be a // address, [scheme://]address:port or a filename. The address part is checked // and in case of filename a resolv.conf like file is (assumed) and parsed and @@ -21,10 +32,11 @@ func HostPortOrFile(s ...string) ([]string, error) { trans, host := Transport(h) addr, _, err := net.SplitHostPort(host) + if err != nil { // Parse didn't work, it is not a addr:port combo - if net.ParseIP(host) == nil { - // Not an IP address. + hostNoZone := stripZone(host) + if net.ParseIP(hostNoZone) == nil { ss, err := tryFile(host) if err == nil { servers = append(servers, ss...) @@ -47,8 +59,7 @@ func HostPortOrFile(s ...string) ([]string, error) { continue } - if net.ParseIP(addr) == nil { - // Not an IP address. + if net.ParseIP(stripZone(addr)) == nil { ss, err := tryFile(host) if err == nil { servers = append(servers, ss...) diff --git a/plugin/pkg/parse/host_test.go b/plugin/pkg/parse/host_test.go index f6e771f29c3..1c23c5beed1 100644 --- a/plugin/pkg/parse/host_test.go +++ b/plugin/pkg/parse/host_test.go @@ -34,6 +34,26 @@ func TestHostPortOrFile(t *testing.T) { "127.0.0.1:53", false, }, + { + "fe80::1", + "[fe80::1]:53", + false, + }, + { + "fe80::1%ens3", + "[fe80::1%ens3]:53", + false, + }, + { + "[fd01::1]:153", + "[fd01::1]:153", + false, + }, + { + "[fd01::1%ens3]:153", + "[fd01::1%ens3]:153", + false, + }, } err := ioutil.WriteFile("resolv.conf", []byte("nameserver 127.0.0.1\n"), 0600) From 358bd8f4e2df9669984c7ac8fcfd8ba5b6b22cfc Mon Sep 17 00:00:00 2001 From: "Brad P. Crochet" Date: Wed, 22 Jan 2020 13:06:30 +0100 Subject: [PATCH 324/324] UPSTREAM: : openshift: Vendor required modules --- go.mod | 21 +- go.sum | 7 + vendor/cloud.google.com/go/LICENSE | 202 + .../go/compute/metadata/metadata.go | 513 + .../github.com/Azure/azure-sdk-for-go/LICENSE | 202 + .../github.com/Azure/azure-sdk-for-go/NOTICE | 5 + .../profiles/latest/dns/mgmt/dns/models.go | 138 + .../dns/mgmt/2018-05-01/dns/client.go | 51 + .../dns/mgmt/2018-05-01/dns/models.go | 943 + .../dns/mgmt/2018-05-01/dns/recordsets.go | 779 + .../mgmt/2018-05-01/dns/resourcereference.go | 117 + .../dns/mgmt/2018-05-01/dns/version.go | 30 + .../services/dns/mgmt/2018-05-01/dns/zones.go | 611 + .../Azure/azure-sdk-for-go/version/version.go | 21 + .../Azure/go-autorest/autorest/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/README.md | 292 + .../Azure/go-autorest/autorest/adal/config.go | 151 + .../go-autorest/autorest/adal/devicetoken.go | 269 + .../Azure/go-autorest/autorest/adal/go.mod | 12 + .../Azure/go-autorest/autorest/adal/go.sum | 23 + .../autorest/adal/go_mod_tidy_hack.go | 24 + .../go-autorest/autorest/adal/persist.go | 73 + .../Azure/go-autorest/autorest/adal/sender.go | 95 + .../Azure/go-autorest/autorest/adal/token.go | 1112 + .../go-autorest/autorest/adal/version.go | 45 + .../go-autorest/autorest/authorization.go | 336 + .../go-autorest/autorest/authorization_sas.go | 67 + .../autorest/authorization_storage.go | 301 + .../Azure/go-autorest/autorest/autorest.go | 150 + .../Azure/go-autorest/autorest/azure/async.go | 924 + .../go-autorest/autorest/azure/auth/LICENSE | 191 + .../go-autorest/autorest/azure/auth/auth.go | 737 + .../go-autorest/autorest/azure/auth/go.mod | 11 + .../go-autorest/autorest/azure/auth/go.sum | 36 + .../autorest/azure/auth/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/autorest/azure/azure.go | 335 + .../go-autorest/autorest/azure/cli/LICENSE | 191 + .../go-autorest/autorest/azure/cli/go.mod | 11 + .../go-autorest/autorest/azure/cli/go.sum | 29 + .../autorest/azure/cli/go_mod_tidy_hack.go | 24 + .../go-autorest/autorest/azure/cli/profile.go | 83 + .../go-autorest/autorest/azure/cli/token.go | 175 + .../autorest/azure/environments.go | 244 + .../autorest/azure/metadata_environment.go | 245 + .../Azure/go-autorest/autorest/azure/rp.go | 204 + .../Azure/go-autorest/autorest/client.go | 300 + .../Azure/go-autorest/autorest/date/LICENSE | 191 + .../Azure/go-autorest/autorest/date/date.go | 96 + .../Azure/go-autorest/autorest/date/go.mod | 5 + .../Azure/go-autorest/autorest/date/go.sum | 16 + .../autorest/date/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/autorest/date/time.go | 103 + .../go-autorest/autorest/date/timerfc1123.go | 100 + .../go-autorest/autorest/date/unixtime.go | 123 + .../go-autorest/autorest/date/utility.go | 25 + .../Azure/go-autorest/autorest/error.go | 98 + .../Azure/go-autorest/autorest/go.mod | 11 + .../Azure/go-autorest/autorest/go.sum | 30 + .../Azure/go-autorest/autorest/preparer.go | 550 + .../Azure/go-autorest/autorest/responder.go | 269 + .../go-autorest/autorest/retriablerequest.go | 52 + .../autorest/retriablerequest_1.7.go | 54 + .../autorest/retriablerequest_1.8.go | 66 + .../Azure/go-autorest/autorest/sender.go | 407 + .../Azure/go-autorest/autorest/to/LICENSE | 191 + .../Azure/go-autorest/autorest/to/convert.go | 152 + .../Azure/go-autorest/autorest/to/go.mod | 3 + .../Azure/go-autorest/autorest/utility.go | 228 + .../Azure/go-autorest/autorest/version.go | 41 + .../Azure/go-autorest/logger/LICENSE | 191 + .../Azure/go-autorest/logger/go.mod | 3 + .../Azure/go-autorest/logger/logger.go | 328 + .../Azure/go-autorest/tracing/LICENSE | 191 + .../Azure/go-autorest/tracing/go.mod | 3 + .../Azure/go-autorest/tracing/tracing.go | 67 + .../github.com/DataDog/datadog-go/LICENSE.txt | 19 + .../DataDog/datadog-go/statsd/README.md | 5 + .../DataDog/datadog-go/statsd/options.go | 109 + .../DataDog/datadog-go/statsd/statsd.go | 757 + .../DataDog/datadog-go/statsd/udp.go | 73 + .../DataDog/datadog-go/statsd/uds.go | 11 + .../DataDog/datadog-go/statsd/uds_async.go | 113 + .../DataDog/datadog-go/statsd/uds_blocking.go | 92 + vendor/github.com/DataDog/zstd/.travis.yml | 30 + vendor/github.com/DataDog/zstd/LICENSE | 27 + vendor/github.com/DataDog/zstd/README.md | 120 + vendor/github.com/DataDog/zstd/ZSTD_LICENSE | 30 + vendor/github.com/DataDog/zstd/bitstream.h | 471 + vendor/github.com/DataDog/zstd/compiler.h | 111 + vendor/github.com/DataDog/zstd/cover.c | 1048 + vendor/github.com/DataDog/zstd/cpu.h | 216 + vendor/github.com/DataDog/zstd/divsufsort.c | 1913 + vendor/github.com/DataDog/zstd/divsufsort.h | 67 + .../github.com/DataDog/zstd/entropy_common.c | 221 + .../github.com/DataDog/zstd/error_private.c | 48 + .../github.com/DataDog/zstd/error_private.h | 76 + vendor/github.com/DataDog/zstd/errors.go | 35 + vendor/github.com/DataDog/zstd/fse.h | 704 + vendor/github.com/DataDog/zstd/fse_compress.c | 849 + .../github.com/DataDog/zstd/fse_decompress.c | 309 + vendor/github.com/DataDog/zstd/huf.h | 327 + vendor/github.com/DataDog/zstd/huf_compress.c | 788 + .../github.com/DataDog/zstd/huf_decompress.c | 1096 + vendor/github.com/DataDog/zstd/mem.h | 362 + vendor/github.com/DataDog/zstd/pool.c | 283 + vendor/github.com/DataDog/zstd/pool.h | 74 + vendor/github.com/DataDog/zstd/threading.c | 75 + vendor/github.com/DataDog/zstd/threading.h | 123 + .../github.com/DataDog/zstd/travis_test_32.sh | 18 + vendor/github.com/DataDog/zstd/update.txt | 56 + vendor/github.com/DataDog/zstd/xxhash.c | 875 + vendor/github.com/DataDog/zstd/xxhash.h | 305 + vendor/github.com/DataDog/zstd/zbuff.h | 213 + vendor/github.com/DataDog/zstd/zbuff_common.c | 26 + .../github.com/DataDog/zstd/zbuff_compress.c | 147 + .../DataDog/zstd/zbuff_decompress.c | 75 + vendor/github.com/DataDog/zstd/zdict.c | 1108 + vendor/github.com/DataDog/zstd/zdict.h | 212 + vendor/github.com/DataDog/zstd/zstd.go | 144 + vendor/github.com/DataDog/zstd/zstd.h | 1399 + vendor/github.com/DataDog/zstd/zstd_common.c | 86 + .../github.com/DataDog/zstd/zstd_compress.c | 3449 + .../DataDog/zstd/zstd_compress_internal.h | 709 + .../github.com/DataDog/zstd/zstd_decompress.c | 3003 + .../DataDog/zstd/zstd_double_fast.c | 327 + .../DataDog/zstd/zstd_double_fast.h | 36 + vendor/github.com/DataDog/zstd/zstd_errors.h | 92 + vendor/github.com/DataDog/zstd/zstd_fast.c | 259 + vendor/github.com/DataDog/zstd/zstd_fast.h | 35 + .../github.com/DataDog/zstd/zstd_internal.h | 290 + vendor/github.com/DataDog/zstd/zstd_lazy.c | 824 + vendor/github.com/DataDog/zstd/zstd_lazy.h | 56 + vendor/github.com/DataDog/zstd/zstd_ldm.c | 653 + vendor/github.com/DataDog/zstd/zstd_ldm.h | 111 + vendor/github.com/DataDog/zstd/zstd_legacy.h | 381 + vendor/github.com/DataDog/zstd/zstd_opt.c | 923 + vendor/github.com/DataDog/zstd/zstd_opt.h | 42 + vendor/github.com/DataDog/zstd/zstd_stream.go | 291 + vendor/github.com/DataDog/zstd/zstd_v01.c | 2127 + vendor/github.com/DataDog/zstd/zstd_v01.h | 89 + vendor/github.com/DataDog/zstd/zstd_v02.c | 3483 + vendor/github.com/DataDog/zstd/zstd_v02.h | 88 + vendor/github.com/DataDog/zstd/zstd_v03.c | 3124 + vendor/github.com/DataDog/zstd/zstd_v03.h | 88 + vendor/github.com/DataDog/zstd/zstd_v04.c | 3677 ++ vendor/github.com/DataDog/zstd/zstd_v04.h | 137 + vendor/github.com/DataDog/zstd/zstd_v05.c | 4011 ++ vendor/github.com/DataDog/zstd/zstd_v05.h | 157 + vendor/github.com/DataDog/zstd/zstd_v06.c | 4124 ++ vendor/github.com/DataDog/zstd/zstd_v06.h | 167 + vendor/github.com/DataDog/zstd/zstd_v07.c | 4502 ++ vendor/github.com/DataDog/zstd/zstd_v07.h | 182 + .../github.com/DataDog/zstd/zstdmt_compress.c | 1831 + .../github.com/DataDog/zstd/zstdmt_compress.h | 156 + vendor/github.com/Shopify/sarama/.gitignore | 27 + vendor/github.com/Shopify/sarama/.travis.yml | 35 + vendor/github.com/Shopify/sarama/CHANGELOG.md | 683 + vendor/github.com/Shopify/sarama/LICENSE | 20 + vendor/github.com/Shopify/sarama/Makefile | 32 + vendor/github.com/Shopify/sarama/README.md | 39 + vendor/github.com/Shopify/sarama/Vagrantfile | 20 + .../github.com/Shopify/sarama/acl_bindings.go | 135 + .../Shopify/sarama/acl_create_request.go | 83 + .../Shopify/sarama/acl_create_response.go | 88 + .../Shopify/sarama/acl_delete_request.go | 57 + .../Shopify/sarama/acl_delete_response.go | 156 + .../Shopify/sarama/acl_describe_request.go | 34 + .../Shopify/sarama/acl_describe_response.go | 86 + .../github.com/Shopify/sarama/acl_filter.go | 78 + vendor/github.com/Shopify/sarama/acl_types.go | 54 + .../sarama/add_offsets_to_txn_request.go | 52 + .../sarama/add_offsets_to_txn_response.go | 44 + .../sarama/add_partitions_to_txn_request.go | 76 + .../sarama/add_partitions_to_txn_response.go | 108 + vendor/github.com/Shopify/sarama/admin.go | 614 + .../Shopify/sarama/alter_configs_request.go | 120 + .../Shopify/sarama/alter_configs_response.go | 95 + .../Shopify/sarama/api_versions_request.go | 24 + .../Shopify/sarama/api_versions_response.go | 87 + .../Shopify/sarama/async_producer.go | 1099 + .../Shopify/sarama/balance_strategy.go | 129 + vendor/github.com/Shopify/sarama/broker.go | 1102 + vendor/github.com/Shopify/sarama/client.go | 913 + vendor/github.com/Shopify/sarama/compress.go | 75 + vendor/github.com/Shopify/sarama/config.go | 616 + .../Shopify/sarama/config_resource_type.go | 15 + vendor/github.com/Shopify/sarama/consumer.go | 833 + .../Shopify/sarama/consumer_group.go | 780 + .../Shopify/sarama/consumer_group_members.go | 96 + .../sarama/consumer_metadata_request.go | 34 + .../sarama/consumer_metadata_response.go | 78 + .../github.com/Shopify/sarama/crc32_field.go | 69 + .../sarama/create_partitions_request.go | 121 + .../sarama/create_partitions_response.go | 94 + .../Shopify/sarama/create_topics_request.go | 174 + .../Shopify/sarama/create_topics_response.go | 112 + .../github.com/Shopify/sarama/decompress.go | 63 + .../Shopify/sarama/delete_groups_request.go | 30 + .../Shopify/sarama/delete_groups_response.go | 70 + .../Shopify/sarama/delete_records_request.go | 126 + .../Shopify/sarama/delete_records_response.go | 158 + .../Shopify/sarama/delete_topics_request.go | 48 + .../Shopify/sarama/delete_topics_response.go | 78 + .../sarama/describe_configs_request.go | 112 + .../sarama/describe_configs_response.go | 320 + .../Shopify/sarama/describe_groups_request.go | 30 + .../sarama/describe_groups_response.go | 187 + vendor/github.com/Shopify/sarama/dev.yml | 10 + .../Shopify/sarama/encoder_decoder.go | 89 + .../Shopify/sarama/end_txn_request.go | 50 + .../Shopify/sarama/end_txn_response.go | 44 + vendor/github.com/Shopify/sarama/errors.go | 329 + .../Shopify/sarama/fetch_request.go | 170 + .../Shopify/sarama/fetch_response.go | 409 + .../sarama/find_coordinator_request.go | 61 + .../sarama/find_coordinator_response.go | 92 + vendor/github.com/Shopify/sarama/go.mod | 13 + vendor/github.com/Shopify/sarama/go.sum | 18 + .../Shopify/sarama/heartbeat_request.go | 47 + .../Shopify/sarama/heartbeat_response.go | 32 + .../sarama/init_producer_id_request.go | 43 + .../sarama/init_producer_id_response.go | 55 + .../Shopify/sarama/join_group_request.go | 163 + .../Shopify/sarama/join_group_response.go | 135 + .../Shopify/sarama/leave_group_request.go | 40 + .../Shopify/sarama/leave_group_response.go | 32 + .../github.com/Shopify/sarama/length_field.go | 82 + .../Shopify/sarama/list_groups_request.go | 24 + .../Shopify/sarama/list_groups_response.go | 69 + vendor/github.com/Shopify/sarama/message.go | 165 + .../github.com/Shopify/sarama/message_set.go | 108 + .../Shopify/sarama/metadata_request.go | 88 + .../Shopify/sarama/metadata_response.go | 321 + vendor/github.com/Shopify/sarama/metrics.go | 51 + .../github.com/Shopify/sarama/mockbroker.go | 330 + .../Shopify/sarama/mockresponses.go | 868 + .../Shopify/sarama/offset_commit_request.go | 210 + .../Shopify/sarama/offset_commit_response.go | 110 + .../Shopify/sarama/offset_fetch_request.go | 100 + .../Shopify/sarama/offset_fetch_response.go | 197 + .../Shopify/sarama/offset_manager.go | 581 + .../Shopify/sarama/offset_request.go | 156 + .../Shopify/sarama/offset_response.go | 174 + .../Shopify/sarama/packet_decoder.go | 60 + .../Shopify/sarama/packet_encoder.go | 65 + .../github.com/Shopify/sarama/partitioner.go | 217 + .../github.com/Shopify/sarama/prep_encoder.go | 153 + .../Shopify/sarama/produce_request.go | 252 + .../Shopify/sarama/produce_response.go | 189 + .../github.com/Shopify/sarama/produce_set.go | 258 + .../github.com/Shopify/sarama/real_decoder.go | 324 + .../github.com/Shopify/sarama/real_encoder.go | 156 + vendor/github.com/Shopify/sarama/record.go | 113 + .../github.com/Shopify/sarama/record_batch.go | 213 + vendor/github.com/Shopify/sarama/records.go | 194 + vendor/github.com/Shopify/sarama/request.go | 151 + .../Shopify/sarama/response_header.go | 21 + vendor/github.com/Shopify/sarama/sarama.go | 99 + .../sarama/sasl_authenticate_request.go | 29 + .../sarama/sasl_authenticate_response.go | 44 + .../Shopify/sarama/sasl_handshake_request.go | 34 + .../Shopify/sarama/sasl_handshake_response.go | 38 + .../Shopify/sarama/sync_group_request.go | 100 + .../Shopify/sarama/sync_group_response.go | 41 + .../Shopify/sarama/sync_producer.go | 149 + vendor/github.com/Shopify/sarama/timestamp.go | 40 + .../sarama/txn_offset_commit_request.go | 126 + .../sarama/txn_offset_commit_response.go | 83 + vendor/github.com/Shopify/sarama/utils.go | 220 + vendor/github.com/Shopify/sarama/zstd_cgo.go | 13 + .../Shopify/sarama/zstd_fallback.go | 17 + vendor/github.com/apache/thrift/LICENSE | 239 + vendor/github.com/apache/thrift/NOTICE | 5 + .../lib/go/thrift/application_exception.go | 170 + .../thrift/lib/go/thrift/binary_protocol.go | 505 + .../lib/go/thrift/buffered_transport.go | 92 + .../apache/thrift/lib/go/thrift/client.go | 95 + .../thrift/lib/go/thrift/compact_protocol.go | 810 + .../apache/thrift/lib/go/thrift/context.go | 24 + .../thrift/lib/go/thrift/debug_protocol.go | 270 + .../thrift/lib/go/thrift/deserializer.go | 58 + .../apache/thrift/lib/go/thrift/exception.go | 44 + .../apache/thrift/lib/go/thrift/field.go | 79 + .../thrift/lib/go/thrift/framed_transport.go | 187 + .../thrift/lib/go/thrift/header_context.go | 101 + .../thrift/lib/go/thrift/header_protocol.go | 305 + .../thrift/lib/go/thrift/header_transport.go | 723 + .../thrift/lib/go/thrift/http_client.go | 242 + .../thrift/lib/go/thrift/http_transport.go | 63 + .../lib/go/thrift/iostream_transport.go | 214 + .../thrift/lib/go/thrift/json_protocol.go | 581 + .../thrift/lib/go/thrift/memory_buffer.go | 80 + .../thrift/lib/go/thrift/messagetype.go | 31 + .../lib/go/thrift/multiplexed_protocol.go | 170 + .../apache/thrift/lib/go/thrift/numeric.go | 164 + .../apache/thrift/lib/go/thrift/pointerize.go | 52 + .../thrift/lib/go/thrift/processor_factory.go | 70 + .../apache/thrift/lib/go/thrift/protocol.go | 177 + .../lib/go/thrift/protocol_exception.go | 77 + .../thrift/lib/go/thrift/protocol_factory.go | 25 + .../thrift/lib/go/thrift/rich_transport.go | 68 + .../apache/thrift/lib/go/thrift/serializer.go | 79 + .../apache/thrift/lib/go/thrift/server.go | 35 + .../thrift/lib/go/thrift/server_socket.go | 137 + .../thrift/lib/go/thrift/server_transport.go | 34 + .../lib/go/thrift/simple_json_protocol.go | 1338 + .../thrift/lib/go/thrift/simple_server.go | 278 + .../apache/thrift/lib/go/thrift/socket.go | 166 + .../thrift/lib/go/thrift/ssl_server_socket.go | 112 + .../apache/thrift/lib/go/thrift/ssl_socket.go | 176 + .../apache/thrift/lib/go/thrift/transport.go | 70 + .../lib/go/thrift/transport_exception.go | 90 + .../thrift/lib/go/thrift/transport_factory.go | 39 + .../apache/thrift/lib/go/thrift/type.go | 69 + .../thrift/lib/go/thrift/zlib_transport.go | 132 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../aws/aws-sdk-go/aws/awserr/error.go | 164 + .../aws/aws-sdk-go/aws/awserr/types.go | 221 + .../aws/aws-sdk-go/aws/awsutil/copy.go | 108 + .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 221 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 113 + .../aws-sdk-go/aws/awsutil/string_value.go | 88 + .../aws/aws-sdk-go/aws/client/client.go | 97 + .../aws-sdk-go/aws/client/default_retryer.go | 177 + .../aws/aws-sdk-go/aws/client/logger.go | 194 + .../aws/client/metadata/client_info.go | 14 + .../aws-sdk-go/aws/client/no_op_retryer.go | 28 + .../github.com/aws/aws-sdk-go/aws/config.go | 579 + .../aws/aws-sdk-go/aws/context_1_5.go | 37 + .../aws/aws-sdk-go/aws/context_1_9.go | 11 + .../aws-sdk-go/aws/context_background_1_5.go | 56 + .../aws-sdk-go/aws/context_background_1_7.go | 20 + .../aws/aws-sdk-go/aws/context_sleep.go | 24 + .../aws/aws-sdk-go/aws/convert_types.go | 918 + .../aws-sdk-go/aws/corehandlers/handlers.go | 230 + .../aws/corehandlers/param_validator.go | 17 + .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws/credentials/chain_provider.go | 100 + .../aws-sdk-go/aws/credentials/credentials.go | 299 + .../ec2rolecreds/ec2_role_provider.go | 180 + .../aws/credentials/endpointcreds/provider.go | 203 + .../aws/credentials/env_provider.go | 74 + .../aws-sdk-go/aws/credentials/example.ini | 12 + .../aws/credentials/processcreds/provider.go | 426 + .../shared_credentials_provider.go | 150 + .../aws/credentials/static_provider.go | 55 + .../stscreds/assume_role_provider.go | 312 + .../stscreds/web_identity_provider.go | 100 + .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 69 + .../aws/aws-sdk-go/aws/csm/enable.go | 89 + .../aws/aws-sdk-go/aws/csm/metric.go | 109 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 55 + .../aws-sdk-go/aws/csm/metric_exception.go | 26 + .../aws/aws-sdk-go/aws/csm/reporter.go | 264 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 207 + .../aws-sdk-go/aws/defaults/shared_config.go | 27 + vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 199 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 226 + .../aws/ec2metadata/token_provider.go | 92 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 216 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 6159 ++ .../aws/endpoints/dep_service_ids.go | 141 + .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 564 + .../aws/endpoints/legacy_regions.go | 24 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 341 + .../aws/endpoints/v3model_codegen.go | 351 + .../github.com/aws/aws-sdk-go/aws/errors.go | 13 + .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 118 + .../aws/request/connection_reset_error.go | 18 + .../aws/aws-sdk-go/aws/request/handlers.go | 322 + .../aws-sdk-go/aws/request/http_request.go | 24 + .../aws-sdk-go/aws/request/offset_reader.go | 65 + .../aws/aws-sdk-go/aws/request/request.go | 678 + .../aws/aws-sdk-go/aws/request/request_1_7.go | 39 + .../aws/aws-sdk-go/aws/request/request_1_8.go | 36 + .../aws-sdk-go/aws/request/request_context.go | 14 + .../aws/request/request_context_1_6.go | 14 + .../aws/request/request_pagination.go | 266 + .../aws/aws-sdk-go/aws/request/retryer.go | 307 + .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 286 + .../aws/aws-sdk-go/aws/request/waiter.go | 295 + .../aws/session/cabundle_transport.go | 26 + .../aws/session/cabundle_transport_1_5.go | 22 + .../aws/session/cabundle_transport_1_6.go | 23 + .../aws/aws-sdk-go/aws/session/credentials.go | 259 + .../aws/aws-sdk-go/aws/session/doc.go | 245 + .../aws/aws-sdk-go/aws/session/env_config.go | 345 + .../aws/aws-sdk-go/aws/session/session.go | 728 + .../aws-sdk-go/aws/session/shared_config.go | 547 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 806 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 207 + vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 + .../github.com/aws/aws-sdk-go/aws/version.go | 8 + .../aws/aws-sdk-go/internal/ini/ast.go | 120 + .../aws-sdk-go/internal/ini/comma_token.go | 11 + .../aws-sdk-go/internal/ini/comment_token.go | 35 + .../aws/aws-sdk-go/internal/ini/doc.go | 29 + .../aws-sdk-go/internal/ini/empty_token.go | 4 + .../aws/aws-sdk-go/internal/ini/expression.go | 24 + .../aws/aws-sdk-go/internal/ini/fuzz.go | 17 + .../aws/aws-sdk-go/internal/ini/ini.go | 51 + .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 + .../aws/aws-sdk-go/internal/ini/ini_parser.go | 356 + .../aws-sdk-go/internal/ini/literal_tokens.go | 324 + .../aws-sdk-go/internal/ini/newline_token.go | 30 + .../aws-sdk-go/internal/ini/number_helper.go | 152 + .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 + .../aws-sdk-go/internal/ini/parse_error.go | 43 + .../aws-sdk-go/internal/ini/parse_stack.go | 60 + .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 + .../aws/aws-sdk-go/internal/ini/skipper.go | 45 + .../aws/aws-sdk-go/internal/ini/statement.go | 35 + .../aws/aws-sdk-go/internal/ini/value_util.go | 284 + .../aws/aws-sdk-go/internal/ini/visitor.go | 166 + .../aws/aws-sdk-go/internal/ini/walker.go | 25 + .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 + .../aws/aws-sdk-go/internal/sdkio/byte.go | 12 + .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../aws/aws-sdk-go/internal/sdkmath/floor.go | 15 + .../internal/sdkmath/floor_go1.9.go | 56 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkrand/read.go | 11 + .../aws-sdk-go/internal/sdkrand/read_1_5.go | 24 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../internal/shareddefaults/ecs_container.go | 12 + .../internal/shareddefaults/shared_config.go | 40 + .../aws/aws-sdk-go/private/protocol/host.go | 68 + .../private/protocol/host_prefix.go | 54 + .../private/protocol/idempotency.go | 75 + .../private/protocol/json/jsonutil/build.go | 296 + .../protocol/json/jsonutil/unmarshal.go | 250 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../private/protocol/query/build.go | 36 + .../protocol/query/queryutil/queryutil.go | 246 + .../private/protocol/query/unmarshal.go | 39 + .../private/protocol/query/unmarshal_error.go | 69 + .../aws-sdk-go/private/protocol/rest/build.go | 310 + .../private/protocol/rest/payload.go | 45 + .../private/protocol/rest/unmarshal.go | 237 + .../private/protocol/restxml/restxml.go | 79 + .../aws-sdk-go/private/protocol/timestamp.go | 84 + .../aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private/protocol/xml/xmlutil/build.go | 306 + .../private/protocol/xml/xmlutil/sort.go | 32 + .../private/protocol/xml/xmlutil/unmarshal.go | 291 + .../protocol/xml/xmlutil/xml_to_struct.go | 159 + .../aws/aws-sdk-go/service/route53/api.go | 15366 +++++ .../service/route53/customizations.go | 42 + .../aws/aws-sdk-go/service/route53/doc.go | 29 + .../aws/aws-sdk-go/service/route53/errors.go | 438 + .../service/route53/route53iface/interface.go | 300 + .../aws/aws-sdk-go/service/route53/service.go | 98 + .../service/route53/unmarshal_error.go | 106 + .../aws/aws-sdk-go/service/route53/waiters.go | 56 + .../aws/aws-sdk-go/service/sts/api.go | 3115 + .../aws-sdk-go/service/sts/customizations.go | 11 + .../aws/aws-sdk-go/service/sts/doc.go | 108 + .../aws/aws-sdk-go/service/sts/errors.go | 82 + .../aws/aws-sdk-go/service/sts/service.go | 98 + .../service/sts/stsiface/interface.go | 96 + vendor/github.com/beorn7/perks/LICENSE | 20 + .../beorn7/perks/quantile/exampledata.txt | 2388 + .../beorn7/perks/quantile/stream.go | 316 + .../caddyserver/caddy/.gitattributes | 21 + .../github.com/caddyserver/caddy/.gitignore | 22 + .../github.com/caddyserver/caddy/LICENSE.txt | 201 + vendor/github.com/caddyserver/caddy/README.md | 225 + vendor/github.com/caddyserver/caddy/assets.go | 48 + .../caddyserver/caddy/azure-pipelines.yml | 88 + vendor/github.com/caddyserver/caddy/caddy.go | 1036 + .../caddyserver/caddy/caddyfile/dispenser.go | 260 + .../caddyserver/caddy/caddyfile/json.go | 198 + .../caddyserver/caddy/caddyfile/lexer.go | 150 + .../caddyserver/caddy/caddyfile/parse.go | 490 + .../github.com/caddyserver/caddy/commands.go | 133 + .../caddyserver/caddy/controller.go | 145 + vendor/github.com/caddyserver/caddy/go.mod | 24 + vendor/github.com/caddyserver/caddy/go.sum | 364 + .../caddyserver/caddy/onevent/hook/config.go | 20 + .../caddyserver/caddy/onevent/hook/hook.go | 41 + .../caddyserver/caddy/onevent/on.go | 71 + .../github.com/caddyserver/caddy/plugins.go | 470 + .../caddyserver/caddy/rlimit_nonposix.go | 22 + .../caddyserver/caddy/rlimit_posix.go | 37 + .../github.com/caddyserver/caddy/sigtrap.go | 108 + .../caddyserver/caddy/sigtrap_nonposix.go | 19 + .../caddyserver/caddy/sigtrap_posix.go | 114 + .../caddyserver/caddy/telemetry/collection.go | 310 + .../caddyserver/caddy/telemetry/telemetry.go | 428 + .../github.com/caddyserver/caddy/upgrade.go | 232 + .../github.com/celebdor/zeroconf/.gitignore | 24 + vendor/github.com/celebdor/zeroconf/LICENSE | 27 + vendor/github.com/celebdor/zeroconf/README.md | 108 + vendor/github.com/celebdor/zeroconf/client.go | 458 + .../celebdor/zeroconf/connection.go | 115 + vendor/github.com/celebdor/zeroconf/doc.go | 14 + vendor/github.com/celebdor/zeroconf/server.go | 761 + .../github.com/celebdor/zeroconf/service.go | 109 + vendor/github.com/celebdor/zeroconf/utils.go | 8 + vendor/github.com/cenkalti/backoff/.gitignore | 22 + .../github.com/cenkalti/backoff/.travis.yml | 10 + vendor/github.com/cenkalti/backoff/LICENSE | 20 + vendor/github.com/cenkalti/backoff/README.md | 30 + vendor/github.com/cenkalti/backoff/backoff.go | 66 + vendor/github.com/cenkalti/backoff/context.go | 63 + .../cenkalti/backoff/exponential.go | 153 + vendor/github.com/cenkalti/backoff/retry.go | 82 + vendor/github.com/cenkalti/backoff/ticker.go | 82 + vendor/github.com/cenkalti/backoff/tries.go | 35 + .../github.com/cespare/xxhash/v2/.travis.yml | 8 + .../github.com/cespare/xxhash/v2/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/v2/README.md | 55 + vendor/github.com/cespare/xxhash/v2/go.mod | 3 + vendor/github.com/cespare/xxhash/v2/go.sum | 0 vendor/github.com/cespare/xxhash/v2/xxhash.go | 236 + .../cespare/xxhash/v2/xxhash_amd64.go | 13 + .../cespare/xxhash/v2/xxhash_amd64.s | 215 + .../cespare/xxhash/v2/xxhash_other.go | 76 + .../cespare/xxhash/v2/xxhash_safe.go | 15 + .../cespare/xxhash/v2/xxhash_unsafe.go | 46 + vendor/github.com/coredns/federation/OWNERS | 6 + .../github.com/coredns/federation/README.md | 47 + .../coredns/federation/federation.go | 149 + .../federation/kubernetes_federations.go | 51 + vendor/github.com/coredns/federation/parse.go | 129 + vendor/github.com/coredns/federation/setup.go | 95 + vendor/github.com/coreos/go-systemd/LICENSE | 191 + vendor/github.com/coreos/go-systemd/NOTICE | 5 + .../coreos/go-systemd/journal/journal.go | 189 + vendor/github.com/coreos/pkg/LICENSE | 202 + vendor/github.com/coreos/pkg/NOTICE | 5 + .../github.com/coreos/pkg/capnslog/README.md | 39 + .../coreos/pkg/capnslog/formatters.go | 157 + .../coreos/pkg/capnslog/glog_formatter.go | 96 + vendor/github.com/coreos/pkg/capnslog/init.go | 49 + .../coreos/pkg/capnslog/init_windows.go | 25 + .../coreos/pkg/capnslog/journald_formatter.go | 68 + .../coreos/pkg/capnslog/log_hijack.go | 39 + .../github.com/coreos/pkg/capnslog/logmap.go | 245 + .../coreos/pkg/capnslog/pkg_logger.go | 191 + .../coreos/pkg/capnslog/syslog_formatter.go | 65 + vendor/github.com/davecgh/go-spew/LICENSE | 15 + .../github.com/davecgh/go-spew/spew/bypass.go | 145 + .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../github.com/davecgh/go-spew/spew/config.go | 306 + vendor/github.com/davecgh/go-spew/spew/doc.go | 211 + .../github.com/davecgh/go-spew/spew/dump.go | 509 + .../github.com/davecgh/go-spew/spew/format.go | 419 + .../github.com/davecgh/go-spew/spew/spew.go | 148 + vendor/github.com/dgrijalva/jwt-go/.gitignore | 4 + .../github.com/dgrijalva/jwt-go/.travis.yml | 13 + vendor/github.com/dgrijalva/jwt-go/LICENSE | 8 + .../dgrijalva/jwt-go/MIGRATION_GUIDE.md | 97 + vendor/github.com/dgrijalva/jwt-go/README.md | 100 + .../dgrijalva/jwt-go/VERSION_HISTORY.md | 118 + vendor/github.com/dgrijalva/jwt-go/claims.go | 134 + vendor/github.com/dgrijalva/jwt-go/doc.go | 4 + vendor/github.com/dgrijalva/jwt-go/ecdsa.go | 148 + .../dgrijalva/jwt-go/ecdsa_utils.go | 67 + vendor/github.com/dgrijalva/jwt-go/errors.go | 59 + vendor/github.com/dgrijalva/jwt-go/hmac.go | 95 + .../github.com/dgrijalva/jwt-go/map_claims.go | 94 + vendor/github.com/dgrijalva/jwt-go/none.go | 52 + vendor/github.com/dgrijalva/jwt-go/parser.go | 148 + vendor/github.com/dgrijalva/jwt-go/rsa.go | 101 + vendor/github.com/dgrijalva/jwt-go/rsa_pss.go | 126 + .../github.com/dgrijalva/jwt-go/rsa_utils.go | 101 + .../dgrijalva/jwt-go/signing_method.go | 35 + vendor/github.com/dgrijalva/jwt-go/token.go | 108 + .../github.com/dimchansky/utfbom/.gitignore | 37 + .../github.com/dimchansky/utfbom/.travis.yml | 18 + vendor/github.com/dimchansky/utfbom/LICENSE | 201 + vendor/github.com/dimchansky/utfbom/README.md | 66 + vendor/github.com/dimchansky/utfbom/go.mod | 1 + vendor/github.com/dimchansky/utfbom/utfbom.go | 192 + .../dnstap/golang-dnstap/.gitignore | 1 + .../github.com/dnstap/golang-dnstap/COPYRIGHT | 14 + .../dnstap/golang-dnstap/FrameStreamInput.go | 73 + .../dnstap/golang-dnstap/FrameStreamOutput.go | 74 + .../golang-dnstap/FrameStreamSockInput.go | 64 + .../github.com/dnstap/golang-dnstap/LICENSE | 202 + .../dnstap/golang-dnstap/QuietTextFormat.go | 172 + vendor/github.com/dnstap/golang-dnstap/README | 15 + .../dnstap/golang-dnstap/TextOutput.go | 86 + .../dnstap/golang-dnstap/YamlFormat.go | 116 + .../github.com/dnstap/golang-dnstap/dnstap.go | 32 + .../dnstap/golang-dnstap/dnstap.pb.go | 448 + .../github.com/eapache/go-resiliency/LICENSE | 22 + .../eapache/go-resiliency/breaker/README.md | 34 + .../eapache/go-resiliency/breaker/breaker.go | 161 + .../eapache/go-xerial-snappy/.gitignore | 24 + .../eapache/go-xerial-snappy/.travis.yml | 7 + .../eapache/go-xerial-snappy/LICENSE | 21 + .../eapache/go-xerial-snappy/README.md | 13 + .../eapache/go-xerial-snappy/fuzz.go | 16 + .../eapache/go-xerial-snappy/snappy.go | 131 + vendor/github.com/eapache/queue/.gitignore | 23 + vendor/github.com/eapache/queue/.travis.yml | 7 + vendor/github.com/eapache/queue/LICENSE | 21 + vendor/github.com/eapache/queue/README.md | 16 + vendor/github.com/eapache/queue/queue.go | 102 + .../github.com/evanphx/json-patch/.travis.yml | 16 + vendor/github.com/evanphx/json-patch/LICENSE | 25 + .../github.com/evanphx/json-patch/README.md | 292 + vendor/github.com/evanphx/json-patch/merge.go | 383 + vendor/github.com/evanphx/json-patch/patch.go | 682 + .../farsightsec/golang-framestream/.gitignore | 1 + .../farsightsec/golang-framestream/COPYRIGHT | 13 + .../farsightsec/golang-framestream/Control.go | 156 + .../farsightsec/golang-framestream/Decoder.go | 154 + .../farsightsec/golang-framestream/Encoder.go | 99 + .../farsightsec/golang-framestream/LICENSE | 202 + .../farsightsec/golang-framestream/README.md | 13 + .../golang-framestream/framestream.go | 32 + vendor/github.com/flynn/go-shlex/COPYING | 202 + vendor/github.com/flynn/go-shlex/Makefile | 21 + vendor/github.com/flynn/go-shlex/README.md | 2 + vendor/github.com/flynn/go-shlex/shlex.go | 457 + vendor/github.com/go-logfmt/logfmt/.gitignore | 4 + .../github.com/go-logfmt/logfmt/.travis.yml | 16 + .../github.com/go-logfmt/logfmt/CHANGELOG.md | 41 + vendor/github.com/go-logfmt/logfmt/LICENSE | 22 + vendor/github.com/go-logfmt/logfmt/README.md | 33 + vendor/github.com/go-logfmt/logfmt/decode.go | 237 + vendor/github.com/go-logfmt/logfmt/doc.go | 6 + vendor/github.com/go-logfmt/logfmt/encode.go | 322 + vendor/github.com/go-logfmt/logfmt/fuzz.go | 126 + vendor/github.com/go-logfmt/logfmt/go.mod | 3 + vendor/github.com/go-logfmt/logfmt/go.sum | 2 + .../github.com/go-logfmt/logfmt/jsonstring.go | 277 + vendor/github.com/gogo/protobuf/AUTHORS | 15 + vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 + vendor/github.com/gogo/protobuf/LICENSE | 35 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 + .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../github.com/gogo/protobuf/proto/Makefile | 43 + .../github.com/gogo/protobuf/proto/clone.go | 258 + .../gogo/protobuf/proto/custom_gogo.go | 39 + .../github.com/gogo/protobuf/proto/decode.go | 427 + .../gogo/protobuf/proto/deprecated.go | 63 + .../github.com/gogo/protobuf/proto/discard.go | 350 + .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 49 + .../github.com/gogo/protobuf/proto/encode.go | 203 + .../gogo/protobuf/proto/encode_gogo.go | 33 + .../github.com/gogo/protobuf/proto/equal.go | 300 + .../gogo/protobuf/proto/extensions.go | 604 + .../gogo/protobuf/proto/extensions_gogo.go | 368 + vendor/github.com/gogo/protobuf/proto/lib.go | 967 + .../gogo/protobuf/proto/lib_gogo.go | 50 + .../gogo/protobuf/proto/message_set.go | 181 + .../gogo/protobuf/proto/pointer_reflect.go | 357 + .../protobuf/proto/pointer_reflect_gogo.go | 59 + .../gogo/protobuf/proto/pointer_unsafe.go | 308 + .../protobuf/proto/pointer_unsafe_gogo.go | 56 + .../gogo/protobuf/proto/properties.go | 599 + .../gogo/protobuf/proto/properties_gogo.go | 36 + .../gogo/protobuf/proto/skip_gogo.go | 119 + .../gogo/protobuf/proto/table_marshal.go | 3006 + .../gogo/protobuf/proto/table_marshal_gogo.go | 388 + .../gogo/protobuf/proto/table_merge.go | 657 + .../gogo/protobuf/proto/table_unmarshal.go | 2245 + .../protobuf/proto/table_unmarshal_gogo.go | 385 + vendor/github.com/gogo/protobuf/proto/text.go | 928 + .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1018 + .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 49 + .../gogo/protobuf/proto/wrappers.go | 1888 + .../gogo/protobuf/proto/wrappers_gogo.go | 113 + .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 + .../descriptor/descriptor_gostring.gen.go | 752 + .../protoc-gen-gogo/descriptor/helper.go | 390 + .../gogo/protobuf/sortkeys/sortkeys.go | 101 + vendor/github.com/golang/protobuf/AUTHORS | 3 + .../github.com/golang/protobuf/CONTRIBUTORS | 3 + vendor/github.com/golang/protobuf/LICENSE | 28 + .../github.com/golang/protobuf/proto/clone.go | 253 + .../golang/protobuf/proto/decode.go | 427 + .../golang/protobuf/proto/deprecated.go | 63 + .../golang/protobuf/proto/discard.go | 350 + .../golang/protobuf/proto/encode.go | 203 + .../github.com/golang/protobuf/proto/equal.go | 301 + .../golang/protobuf/proto/extensions.go | 607 + .../github.com/golang/protobuf/proto/lib.go | 965 + .../golang/protobuf/proto/message_set.go | 181 + .../golang/protobuf/proto/pointer_reflect.go | 360 + .../golang/protobuf/proto/pointer_unsafe.go | 313 + .../golang/protobuf/proto/properties.go | 544 + .../golang/protobuf/proto/table_marshal.go | 2776 + .../golang/protobuf/proto/table_merge.go | 654 + .../golang/protobuf/proto/table_unmarshal.go | 2053 + .../github.com/golang/protobuf/proto/text.go | 843 + .../golang/protobuf/proto/text_parser.go | 880 + .../github.com/golang/protobuf/ptypes/any.go | 141 + .../golang/protobuf/ptypes/any/any.pb.go | 200 + .../golang/protobuf/ptypes/any/any.proto | 154 + .../github.com/golang/protobuf/ptypes/doc.go | 35 + .../golang/protobuf/ptypes/duration.go | 102 + .../protobuf/ptypes/duration/duration.pb.go | 161 + .../protobuf/ptypes/duration/duration.proto | 117 + .../golang/protobuf/ptypes/timestamp.go | 132 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 179 + .../protobuf/ptypes/timestamp/timestamp.proto | 135 + vendor/github.com/golang/snappy/.gitignore | 16 + vendor/github.com/golang/snappy/AUTHORS | 15 + vendor/github.com/golang/snappy/CONTRIBUTORS | 37 + vendor/github.com/golang/snappy/LICENSE | 27 + vendor/github.com/golang/snappy/README | 107 + vendor/github.com/golang/snappy/decode.go | 237 + .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 + .../github.com/golang/snappy/decode_other.go | 101 + vendor/github.com/golang/snappy/encode.go | 285 + .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 + .../github.com/golang/snappy/encode_other.go | 238 + vendor/github.com/golang/snappy/snappy.go | 98 + vendor/github.com/google/go-cmp/LICENSE | 27 + .../github.com/google/go-cmp/cmp/compare.go | 616 + .../google/go-cmp/cmp/export_panic.go | 15 + .../google/go-cmp/cmp/export_unsafe.go | 23 + .../go-cmp/cmp/internal/diff/debug_disable.go | 17 + .../go-cmp/cmp/internal/diff/debug_enable.go | 122 + .../google/go-cmp/cmp/internal/diff/diff.go | 372 + .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../cmp/internal/flags/toolchain_legacy.go | 10 + .../cmp/internal/flags/toolchain_recent.go | 10 + .../go-cmp/cmp/internal/function/func.go | 99 + .../cmp/internal/value/pointer_purego.go | 23 + .../cmp/internal/value/pointer_unsafe.go | 26 + .../google/go-cmp/cmp/internal/value/sort.go | 104 + .../google/go-cmp/cmp/internal/value/zero.go | 45 + .../github.com/google/go-cmp/cmp/options.go | 524 + vendor/github.com/google/go-cmp/cmp/path.go | 308 + vendor/github.com/google/go-cmp/cmp/report.go | 51 + .../google/go-cmp/cmp/report_compare.go | 296 + .../google/go-cmp/cmp/report_reflect.go | 279 + .../google/go-cmp/cmp/report_slices.go | 333 + .../google/go-cmp/cmp/report_text.go | 382 + .../google/go-cmp/cmp/report_value.go | 121 + vendor/github.com/google/gofuzz/.travis.yml | 13 + .../github.com/google/gofuzz/CONTRIBUTING.md | 67 + vendor/github.com/google/gofuzz/LICENSE | 202 + vendor/github.com/google/gofuzz/README.md | 71 + vendor/github.com/google/gofuzz/doc.go | 18 + vendor/github.com/google/gofuzz/fuzz.go | 487 + vendor/github.com/google/gofuzz/go.mod | 3 + vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/go.mod | 1 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 37 + vendor/github.com/google/uuid/node.go | 90 + vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 245 + vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 38 + .../github.com/googleapis/gax-go/v2/LICENSE | 27 + .../googleapis/gax-go/v2/call_option.go | 161 + vendor/github.com/googleapis/gax-go/v2/gax.go | 39 + vendor/github.com/googleapis/gax-go/v2/go.mod | 3 + vendor/github.com/googleapis/gax-go/v2/go.sum | 25 + .../github.com/googleapis/gax-go/v2/header.go | 53 + .../github.com/googleapis/gax-go/v2/invoke.go | 99 + vendor/github.com/googleapis/gnostic/LICENSE | 203 + .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 8728 +++ .../gnostic/OpenAPIv2/OpenAPIv2.pb.go | 4455 ++ .../gnostic/OpenAPIv2/OpenAPIv2.proto | 663 + .../googleapis/gnostic/OpenAPIv2/README.md | 16 + .../gnostic/OpenAPIv2/openapi-2.0.json | 1610 + .../googleapis/gnostic/compiler/README.md | 3 + .../googleapis/gnostic/compiler/context.go | 43 + .../googleapis/gnostic/compiler/error.go | 61 + .../gnostic/compiler/extension-handler.go | 101 + .../googleapis/gnostic/compiler/helpers.go | 197 + .../googleapis/gnostic/compiler/main.go | 16 + .../googleapis/gnostic/compiler/reader.go | 175 + .../gnostic/extensions/COMPILE-EXTENSION.sh | 5 + .../googleapis/gnostic/extensions/README.md | 5 + .../gnostic/extensions/extension.pb.go | 218 + .../gnostic/extensions/extension.proto | 93 + .../gnostic/extensions/extensions.go | 82 + .../gophercloud/gophercloud/.gitignore | 3 + .../gophercloud/gophercloud/.travis.yml | 25 + .../gophercloud/gophercloud/.zuul.yaml | 114 + .../gophercloud/gophercloud/CHANGELOG.md | 69 + .../gophercloud/gophercloud/LICENSE | 191 + .../gophercloud/gophercloud/README.md | 159 + .../gophercloud/gophercloud/auth_options.go | 437 + .../gophercloud/gophercloud/auth_result.go | 52 + .../github.com/gophercloud/gophercloud/doc.go | 110 + .../gophercloud/endpoint_search.go | 76 + .../gophercloud/gophercloud/errors.go | 471 + .../github.com/gophercloud/gophercloud/go.mod | 7 + .../github.com/gophercloud/gophercloud/go.sum | 8 + .../gophercloud/openstack/auth_env.go | 125 + .../gophercloud/openstack/client.go | 438 + .../gophercloud/gophercloud/openstack/doc.go | 14 + .../openstack/endpoint_location.go | 107 + .../gophercloud/openstack/errors.go | 71 + .../openstack/identity/v2/tenants/doc.go | 65 + .../openstack/identity/v2/tenants/requests.go | 116 + .../openstack/identity/v2/tenants/results.go | 91 + .../openstack/identity/v2/tenants/urls.go | 23 + .../openstack/identity/v2/tokens/doc.go | 46 + .../openstack/identity/v2/tokens/requests.go | 103 + .../openstack/identity/v2/tokens/results.go | 174 + .../openstack/identity/v2/tokens/urls.go | 13 + .../openstack/identity/v3/tokens/doc.go | 108 + .../openstack/identity/v3/tokens/requests.go | 162 + .../openstack/identity/v3/tokens/results.go | 178 + .../openstack/identity/v3/tokens/urls.go | 7 + .../openstack/utils/base_endpoint.go | 28 + .../openstack/utils/choose_version.go | 111 + .../gophercloud/pagination/http.go | 60 + .../gophercloud/pagination/linked.go | 92 + .../gophercloud/pagination/marker.go | 58 + .../gophercloud/pagination/pager.go | 251 + .../gophercloud/gophercloud/pagination/pkg.go | 4 + .../gophercloud/pagination/single.go | 33 + .../gophercloud/gophercloud/params.go | 491 + .../gophercloud/provider_client.go | 501 + .../gophercloud/gophercloud/results.go | 448 + .../gophercloud/gophercloud/service_client.go | 154 + .../gophercloud/gophercloud/util.go | 102 + .../grpc-ecosystem/grpc-opentracing/LICENSE | 27 + .../grpc-ecosystem/grpc-opentracing/PATENTS | 23 + .../grpc-opentracing/go/otgrpc/README.md | 57 + .../grpc-opentracing/go/otgrpc/client.go | 239 + .../grpc-opentracing/go/otgrpc/errors.go | 69 + .../grpc-opentracing/go/otgrpc/options.go | 76 + .../grpc-opentracing/go/otgrpc/package.go | 5 + .../grpc-opentracing/go/otgrpc/server.go | 141 + .../grpc-opentracing/go/otgrpc/shared.go | 42 + .../hashicorp/golang-lru/.gitignore | 23 + vendor/github.com/hashicorp/golang-lru/2q.go | 223 + .../github.com/hashicorp/golang-lru/LICENSE | 362 + .../github.com/hashicorp/golang-lru/README.md | 25 + vendor/github.com/hashicorp/golang-lru/arc.go | 257 + vendor/github.com/hashicorp/golang-lru/doc.go | 21 + vendor/github.com/hashicorp/golang-lru/go.mod | 3 + vendor/github.com/hashicorp/golang-lru/lru.go | 134 + .../hashicorp/golang-lru/simplelru/lru.go | 177 + .../golang-lru/simplelru/lru_interface.go | 39 + vendor/github.com/imdario/mergo/.gitignore | 33 + vendor/github.com/imdario/mergo/.travis.yml | 7 + .../imdario/mergo/CODE_OF_CONDUCT.md | 46 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 238 + vendor/github.com/imdario/mergo/doc.go | 44 + vendor/github.com/imdario/mergo/map.go | 175 + vendor/github.com/imdario/mergo/merge.go | 255 + vendor/github.com/imdario/mergo/mergo.go | 97 + .../github.com/infobloxopen/go-trees/LICENSE | 201 + .../infobloxopen/go-trees/iptree/iptree.go | 341 + .../infobloxopen/go-trees/numtree/node32.go | 438 + .../infobloxopen/go-trees/numtree/node64.go | 380 + .../jmespath/go-jmespath/.gitignore | 4 + .../jmespath/go-jmespath/.travis.yml | 9 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 44 + .../github.com/jmespath/go-jmespath/README.md | 7 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 842 + .../jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../github.com/json-iterator/go/.codecov.yml | 3 + vendor/github.com/json-iterator/go/.gitignore | 4 + .../github.com/json-iterator/go/.travis.yml | 14 + vendor/github.com/json-iterator/go/Gopkg.lock | 21 + vendor/github.com/json-iterator/go/Gopkg.toml | 26 + vendor/github.com/json-iterator/go/LICENSE | 21 + vendor/github.com/json-iterator/go/README.md | 87 + vendor/github.com/json-iterator/go/adapter.go | 150 + vendor/github.com/json-iterator/go/any.go | 325 + .../github.com/json-iterator/go/any_array.go | 278 + .../github.com/json-iterator/go/any_bool.go | 137 + .../github.com/json-iterator/go/any_float.go | 83 + .../github.com/json-iterator/go/any_int32.go | 74 + .../github.com/json-iterator/go/any_int64.go | 74 + .../json-iterator/go/any_invalid.go | 82 + vendor/github.com/json-iterator/go/any_nil.go | 69 + .../github.com/json-iterator/go/any_number.go | 123 + .../github.com/json-iterator/go/any_object.go | 374 + vendor/github.com/json-iterator/go/any_str.go | 166 + .../github.com/json-iterator/go/any_uint32.go | 74 + .../github.com/json-iterator/go/any_uint64.go | 74 + vendor/github.com/json-iterator/go/build.sh | 12 + vendor/github.com/json-iterator/go/config.go | 375 + .../go/fuzzy_mode_convert_table.md | 7 + vendor/github.com/json-iterator/go/go.mod | 11 + vendor/github.com/json-iterator/go/go.sum | 14 + vendor/github.com/json-iterator/go/iter.go | 322 + .../github.com/json-iterator/go/iter_array.go | 58 + .../github.com/json-iterator/go/iter_float.go | 339 + .../github.com/json-iterator/go/iter_int.go | 345 + .../json-iterator/go/iter_object.go | 251 + .../github.com/json-iterator/go/iter_skip.go | 130 + .../json-iterator/go/iter_skip_sloppy.go | 144 + .../json-iterator/go/iter_skip_strict.go | 99 + .../github.com/json-iterator/go/iter_str.go | 215 + .../github.com/json-iterator/go/jsoniter.go | 18 + vendor/github.com/json-iterator/go/pool.go | 42 + vendor/github.com/json-iterator/go/reflect.go | 332 + .../json-iterator/go/reflect_array.go | 104 + .../json-iterator/go/reflect_dynamic.go | 70 + .../json-iterator/go/reflect_extension.go | 483 + .../json-iterator/go/reflect_json_number.go | 112 + .../go/reflect_json_raw_message.go | 60 + .../json-iterator/go/reflect_map.go | 338 + .../json-iterator/go/reflect_marshaler.go | 217 + .../json-iterator/go/reflect_native.go | 453 + .../json-iterator/go/reflect_optional.go | 133 + .../json-iterator/go/reflect_slice.go | 99 + .../go/reflect_struct_decoder.go | 1048 + .../go/reflect_struct_encoder.go | 210 + vendor/github.com/json-iterator/go/stream.go | 211 + .../json-iterator/go/stream_float.go | 111 + .../github.com/json-iterator/go/stream_int.go | 190 + .../github.com/json-iterator/go/stream_str.go | 372 + vendor/github.com/json-iterator/go/test.sh | 12 + vendor/github.com/kr/logfmt/.gitignore | 3 + vendor/github.com/kr/logfmt/Readme | 12 + vendor/github.com/kr/logfmt/decode.go | 184 + vendor/github.com/kr/logfmt/scanner.go | 149 + vendor/github.com/kr/logfmt/unquote.go | 149 + .../golang_protobuf_extensions/LICENSE | 201 + .../golang_protobuf_extensions/NOTICE | 1 + .../pbutil/.gitignore | 1 + .../pbutil/Makefile | 7 + .../pbutil/decode.go | 75 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + vendor/github.com/miekg/dns/.codecov.yml | 8 + vendor/github.com/miekg/dns/.gitignore | 4 + vendor/github.com/miekg/dns/.travis.yml | 16 + vendor/github.com/miekg/dns/AUTHORS | 1 + vendor/github.com/miekg/dns/CODEOWNERS | 1 + vendor/github.com/miekg/dns/CONTRIBUTORS | 10 + vendor/github.com/miekg/dns/COPYRIGHT | 9 + vendor/github.com/miekg/dns/LICENSE | 32 + vendor/github.com/miekg/dns/Makefile.fuzz | 33 + vendor/github.com/miekg/dns/Makefile.release | 52 + vendor/github.com/miekg/dns/README.md | 174 + vendor/github.com/miekg/dns/acceptfunc.go | 54 + vendor/github.com/miekg/dns/client.go | 415 + vendor/github.com/miekg/dns/clientconfig.go | 135 + vendor/github.com/miekg/dns/dane.go | 43 + vendor/github.com/miekg/dns/defaults.go | 378 + vendor/github.com/miekg/dns/dns.go | 134 + vendor/github.com/miekg/dns/dnssec.go | 794 + vendor/github.com/miekg/dns/dnssec_keygen.go | 140 + vendor/github.com/miekg/dns/dnssec_keyscan.go | 322 + vendor/github.com/miekg/dns/dnssec_privkey.go | 94 + vendor/github.com/miekg/dns/doc.go | 269 + vendor/github.com/miekg/dns/duplicate.go | 38 + vendor/github.com/miekg/dns/edns.go | 671 + vendor/github.com/miekg/dns/format.go | 93 + vendor/github.com/miekg/dns/fuzz.go | 32 + vendor/github.com/miekg/dns/generate.go | 247 + vendor/github.com/miekg/dns/go.mod | 12 + vendor/github.com/miekg/dns/go.sum | 33 + vendor/github.com/miekg/dns/labels.go | 212 + vendor/github.com/miekg/dns/listen_go111.go | 44 + .../github.com/miekg/dns/listen_go_not111.go | 23 + vendor/github.com/miekg/dns/msg.go | 1196 + vendor/github.com/miekg/dns/msg_helpers.go | 683 + vendor/github.com/miekg/dns/msg_truncate.go | 111 + vendor/github.com/miekg/dns/nsecx.go | 95 + vendor/github.com/miekg/dns/privaterr.go | 114 + vendor/github.com/miekg/dns/reverse.go | 52 + vendor/github.com/miekg/dns/sanitize.go | 86 + vendor/github.com/miekg/dns/scan.go | 1408 + vendor/github.com/miekg/dns/scan_rr.go | 1698 + vendor/github.com/miekg/dns/serve_mux.go | 123 + vendor/github.com/miekg/dns/server.go | 764 + vendor/github.com/miekg/dns/sig0.go | 209 + vendor/github.com/miekg/dns/singleinflight.go | 61 + vendor/github.com/miekg/dns/smimea.go | 44 + vendor/github.com/miekg/dns/tlsa.go | 44 + vendor/github.com/miekg/dns/tsig.go | 389 + vendor/github.com/miekg/dns/types.go | 1432 + vendor/github.com/miekg/dns/udp.go | 102 + vendor/github.com/miekg/dns/udp_windows.go | 35 + vendor/github.com/miekg/dns/update.go | 110 + vendor/github.com/miekg/dns/version.go | 15 + vendor/github.com/miekg/dns/xfr.go | 263 + vendor/github.com/miekg/dns/zduplicate.go | 1140 + vendor/github.com/miekg/dns/zmsg.go | 2722 + vendor/github.com/miekg/dns/ztypes.go | 881 + .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../github.com/mitchellh/go-homedir/README.md | 14 + vendor/github.com/mitchellh/go-homedir/go.mod | 1 + .../mitchellh/go-homedir/homedir.go | 167 + .../modern-go/concurrent/.gitignore | 1 + .../modern-go/concurrent/.travis.yml | 14 + .../github.com/modern-go/concurrent/LICENSE | 201 + .../github.com/modern-go/concurrent/README.md | 49 + .../modern-go/concurrent/executor.go | 14 + .../modern-go/concurrent/go_above_19.go | 15 + .../modern-go/concurrent/go_below_19.go | 33 + vendor/github.com/modern-go/concurrent/log.go | 13 + .../github.com/modern-go/concurrent/test.sh | 12 + .../concurrent/unbounded_executor.go | 119 + .../github.com/modern-go/reflect2/.gitignore | 2 + .../github.com/modern-go/reflect2/.travis.yml | 15 + .../github.com/modern-go/reflect2/Gopkg.lock | 15 + .../github.com/modern-go/reflect2/Gopkg.toml | 35 + vendor/github.com/modern-go/reflect2/LICENSE | 201 + .../github.com/modern-go/reflect2/README.md | 71 + .../modern-go/reflect2/go_above_17.go | 8 + .../modern-go/reflect2/go_above_19.go | 14 + .../modern-go/reflect2/go_below_17.go | 9 + .../modern-go/reflect2/go_below_19.go | 14 + .../github.com/modern-go/reflect2/reflect2.go | 298 + .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 + .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 + .../github.com/modern-go/reflect2/safe_map.go | 101 + .../modern-go/reflect2/safe_slice.go | 92 + .../modern-go/reflect2/safe_struct.go | 29 + .../modern-go/reflect2/safe_type.go | 78 + vendor/github.com/modern-go/reflect2/test.sh | 12 + .../github.com/modern-go/reflect2/type_map.go | 113 + .../modern-go/reflect2/unsafe_array.go | 65 + .../modern-go/reflect2/unsafe_eface.go | 59 + .../modern-go/reflect2/unsafe_field.go | 74 + .../modern-go/reflect2/unsafe_iface.go | 64 + .../modern-go/reflect2/unsafe_link.go | 70 + .../modern-go/reflect2/unsafe_map.go | 138 + .../modern-go/reflect2/unsafe_ptr.go | 46 + .../modern-go/reflect2/unsafe_slice.go | 177 + .../modern-go/reflect2/unsafe_struct.go | 59 + .../modern-go/reflect2/unsafe_type.go | 85 + .../openshift/coredns-mdns/.gitignore | 2 + .../openshift/coredns-mdns/CONTRIBUTING.md | 11 + vendor/github.com/openshift/coredns-mdns/DCO | 37 + .../openshift/coredns-mdns/Dockerfile | 16 + .../openshift/coredns-mdns/Jenkinsfile | 14 + .../github.com/openshift/coredns-mdns/LICENSE | 202 + .../openshift/coredns-mdns/Makefile | 19 + .../github.com/openshift/coredns-mdns/OWNERS | 10 + .../openshift/coredns-mdns/README.md | 71 + .../openshift/coredns-mdns/etcd.service | 10 + .../github.com/openshift/coredns-mdns/go.mod | 11 + .../github.com/openshift/coredns-mdns/go.sum | 326 + .../github.com/openshift/coredns-mdns/mdns.go | 247 + .../openshift/coredns-mdns/setup.go | 75 + .../go-observer/.gitignore | 14 + .../opentracing-contrib/go-observer/LICENSE | 201 + .../opentracing-contrib/go-observer/README.md | 64 + .../go-observer/observer.go | 39 + .../opentracing/opentracing-go/.gitignore | 1 + .../opentracing/opentracing-go/.travis.yml | 20 + .../opentracing/opentracing-go/CHANGELOG.md | 46 + .../opentracing/opentracing-go/LICENSE | 201 + .../opentracing/opentracing-go/Makefile | 20 + .../opentracing/opentracing-go/README.md | 171 + .../opentracing/opentracing-go/ext/tags.go | 210 + .../opentracing-go/globaltracer.go | 42 + .../opentracing/opentracing-go/gocontext.go | 60 + .../opentracing/opentracing-go/log/field.go | 269 + .../opentracing/opentracing-go/log/util.go | 54 + .../mocktracer/mocklogrecord.go | 105 + .../opentracing-go/mocktracer/mockspan.go | 284 + .../opentracing-go/mocktracer/mocktracer.go | 105 + .../opentracing-go/mocktracer/propagation.go | 120 + .../opentracing/opentracing-go/noop.go | 64 + .../opentracing/opentracing-go/propagation.go | 176 + .../opentracing/opentracing-go/span.go | 189 + .../opentracing/opentracing-go/tracer.go | 304 + .../zipkin-go-opentracing/.gitignore | 4 + .../zipkin-go-opentracing/.travis.yml | 14 + .../zipkin-go-opentracing/LICENSE | 22 + .../zipkin-go-opentracing/Makefile | 26 + .../zipkin-go-opentracing/README.md | 28 + .../zipkin-go-opentracing/collector-http.go | 211 + .../zipkin-go-opentracing/collector-kafka.go | 95 + .../zipkin-go-opentracing/collector-scribe.go | 240 + .../zipkin-go-opentracing/collector.go | 82 + .../zipkin-go-opentracing/context.go | 61 + .../zipkin-go-opentracing/debug.go | 78 + .../zipkin-go-opentracing/event.go | 62 + .../zipkin-go-opentracing/flag/flags.go | 39 + .../log-materializers.go | 113 + .../zipkin-go-opentracing/logger.go | 64 + .../zipkin-go-opentracing/observer.go | 52 + .../zipkin-go-opentracing/propagation.go | 68 + .../zipkin-go-opentracing/propagation_ot.go | 269 + .../zipkin-go-opentracing/raw.go | 30 + .../zipkin-go-opentracing/recorder.go | 60 + .../zipkin-go-opentracing/sample.go | 99 + .../zipkin-go-opentracing/span.go | 290 + .../gen-go/scribe/GoUnusedProtection__.go | 7 + .../thrift/gen-go/scribe/scribe-consts.go | 24 + .../thrift/gen-go/scribe/scribe.go | 551 + .../gen-go/zipkincore/GoUnusedProtection__.go | 7 + .../gen-go/zipkincore/zipkinCore-consts.go | 45 + .../thrift/gen-go/zipkincore/zipkinCore.go | 1285 + .../zipkin-go-opentracing/tracer.go | 440 + .../zipkin-go-opentracing/types/traceid.go | 38 + .../zipkin-go-opentracing/util.go | 25 + .../zipkin-go-opentracing/wire/carrier.go | 65 + .../zipkin-go-opentracing/wire/gen.go | 6 + .../zipkin-go-opentracing/wire/wire.pb.go | 649 + .../zipkin-go-opentracing/wire/wire.proto | 13 + .../zipkin-go-opentracing/zipkin-endpoint.go | 72 + .../zipkin-go-opentracing/zipkin-recorder.go | 218 + vendor/github.com/philhofer/fwd/LICENSE.md | 7 + vendor/github.com/philhofer/fwd/README.md | 315 + vendor/github.com/philhofer/fwd/reader.go | 383 + vendor/github.com/philhofer/fwd/writer.go | 224 + .../philhofer/fwd/writer_appengine.go | 5 + .../github.com/philhofer/fwd/writer_unsafe.go | 18 + vendor/github.com/pierrec/lz4/.gitignore | 33 + vendor/github.com/pierrec/lz4/.travis.yml | 18 + vendor/github.com/pierrec/lz4/LICENSE | 28 + vendor/github.com/pierrec/lz4/README.md | 24 + vendor/github.com/pierrec/lz4/block.go | 397 + vendor/github.com/pierrec/lz4/debug.go | 23 + vendor/github.com/pierrec/lz4/debug_stub.go | 7 + .../pierrec/lz4/internal/xxh32/xxh32zero.go | 222 + vendor/github.com/pierrec/lz4/lz4.go | 68 + vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 + .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 + vendor/github.com/pierrec/lz4/reader.go | 295 + vendor/github.com/pierrec/lz4/writer.go | 267 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../client_golang/prometheus/.gitignore | 1 + .../client_golang/prometheus/README.md | 1 + .../client_golang/prometheus/build_info.go | 29 + .../prometheus/build_info_pre_1.12.go | 22 + .../client_golang/prometheus/collector.go | 120 + .../client_golang/prometheus/counter.go | 277 + .../client_golang/prometheus/desc.go | 185 + .../client_golang/prometheus/doc.go | 200 + .../prometheus/expvar_collector.go | 119 + .../client_golang/prometheus/fnv.go | 42 + .../client_golang/prometheus/gauge.go | 286 + .../client_golang/prometheus/go_collector.go | 396 + .../client_golang/prometheus/histogram.go | 586 + .../prometheus/internal/metric.go | 85 + .../client_golang/prometheus/labels.go | 87 + .../client_golang/prometheus/metric.go | 175 + .../client_golang/prometheus/observer.go | 52 + .../prometheus/process_collector.go | 151 + .../prometheus/process_collector_other.go | 65 + .../prometheus/process_collector_windows.go | 112 + .../prometheus/promhttp/delegator.go | 366 + .../client_golang/prometheus/promhttp/http.go | 349 + .../prometheus/promhttp/instrument_client.go | 219 + .../prometheus/promhttp/instrument_server.go | 447 + .../client_golang/prometheus/registry.go | 947 + .../client_golang/prometheus/summary.go | 736 + .../prometheus/testutil/testutil.go | 187 + .../client_golang/prometheus/timer.go | 54 + .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 162 + .../client_golang/prometheus/vec.go | 472 + .../client_golang/prometheus/wrap.go | 200 + .../prometheus/client_model/LICENSE | 201 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/go/metrics.pb.go | 629 + vendor/github.com/prometheus/common/LICENSE | 201 + vendor/github.com/prometheus/common/NOTICE | 5 + .../prometheus/common/expfmt/decode.go | 429 + .../prometheus/common/expfmt/encode.go | 88 + .../prometheus/common/expfmt/expfmt.go | 38 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 466 + .../prometheus/common/expfmt/text_parse.go | 764 + .../bitbucket.org/ww/goautoneg/README.txt | 67 + .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 102 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/time.go | 270 + .../prometheus/common/model/value.go | 416 + .../github.com/prometheus/procfs/.gitignore | 1 + .../prometheus/procfs/.golangci.yml | 6 + .../prometheus/procfs/CONTRIBUTING.md | 18 + vendor/github.com/prometheus/procfs/LICENSE | 201 + .../prometheus/procfs/MAINTAINERS.md | 2 + vendor/github.com/prometheus/procfs/Makefile | 29 + .../prometheus/procfs/Makefile.common | 275 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 53 + vendor/github.com/prometheus/procfs/arp.go | 85 + .../github.com/prometheus/procfs/buddyinfo.go | 85 + .../github.com/prometheus/procfs/cpuinfo.go | 166 + vendor/github.com/prometheus/procfs/crypto.go | 131 + vendor/github.com/prometheus/procfs/doc.go | 45 + .../prometheus/procfs/fixtures.ttar | 4550 ++ vendor/github.com/prometheus/procfs/fs.go | 43 + vendor/github.com/prometheus/procfs/go.mod | 6 + vendor/github.com/prometheus/procfs/go.sum | 4 + .../prometheus/procfs/internal/fs/fs.go | 55 + .../prometheus/procfs/internal/util/parse.go | 88 + .../procfs/internal/util/sysreadfile.go | 45 + .../internal/util/sysreadfile_compat.go | 26 + .../procfs/internal/util/valueparser.go | 77 + vendor/github.com/prometheus/procfs/ipvs.go | 239 + vendor/github.com/prometheus/procfs/mdstat.go | 194 + .../github.com/prometheus/procfs/mountinfo.go | 178 + .../prometheus/procfs/mountstats.go | 621 + .../github.com/prometheus/procfs/net_dev.go | 206 + .../prometheus/procfs/net_softnet.go | 91 + .../github.com/prometheus/procfs/net_unix.go | 275 + vendor/github.com/prometheus/procfs/proc.go | 311 + .../prometheus/procfs/proc_environ.go | 43 + .../prometheus/procfs/proc_fdinfo.go | 132 + .../github.com/prometheus/procfs/proc_io.go | 65 + .../prometheus/procfs/proc_limits.go | 157 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../github.com/prometheus/procfs/proc_psi.go | 101 + .../github.com/prometheus/procfs/proc_stat.go | 198 + .../prometheus/procfs/proc_status.go | 167 + .../github.com/prometheus/procfs/schedstat.go | 118 + vendor/github.com/prometheus/procfs/stat.go | 244 + vendor/github.com/prometheus/procfs/ttar | 413 + vendor/github.com/prometheus/procfs/vm.go | 210 + vendor/github.com/prometheus/procfs/xfrm.go | 187 + .../github.com/prometheus/procfs/zoneinfo.go | 196 + .../github.com/rcrowley/go-metrics/.gitignore | 9 + .../rcrowley/go-metrics/.travis.yml | 17 + vendor/github.com/rcrowley/go-metrics/LICENSE | 29 + .../github.com/rcrowley/go-metrics/README.md | 168 + .../github.com/rcrowley/go-metrics/counter.go | 112 + .../github.com/rcrowley/go-metrics/debug.go | 76 + vendor/github.com/rcrowley/go-metrics/ewma.go | 138 + .../github.com/rcrowley/go-metrics/gauge.go | 120 + .../rcrowley/go-metrics/gauge_float64.go | 125 + .../rcrowley/go-metrics/graphite.go | 113 + .../rcrowley/go-metrics/healthcheck.go | 61 + .../rcrowley/go-metrics/histogram.go | 202 + vendor/github.com/rcrowley/go-metrics/json.go | 31 + vendor/github.com/rcrowley/go-metrics/log.go | 80 + .../github.com/rcrowley/go-metrics/memory.md | 285 + .../github.com/rcrowley/go-metrics/meter.go | 251 + .../github.com/rcrowley/go-metrics/metrics.go | 13 + .../rcrowley/go-metrics/opentsdb.go | 119 + .../rcrowley/go-metrics/registry.go | 363 + .../github.com/rcrowley/go-metrics/runtime.go | 212 + .../rcrowley/go-metrics/runtime_cgo.go | 10 + .../go-metrics/runtime_gccpufraction.go | 9 + .../rcrowley/go-metrics/runtime_no_cgo.go | 7 + .../go-metrics/runtime_no_gccpufraction.go | 9 + .../github.com/rcrowley/go-metrics/sample.go | 616 + .../github.com/rcrowley/go-metrics/syslog.go | 78 + .../github.com/rcrowley/go-metrics/timer.go | 329 + .../rcrowley/go-metrics/validate.sh | 10 + .../github.com/rcrowley/go-metrics/writer.go | 100 + vendor/github.com/spf13/pflag/.gitignore | 2 + vendor/github.com/spf13/pflag/.travis.yml | 21 + vendor/github.com/spf13/pflag/LICENSE | 28 + vendor/github.com/spf13/pflag/README.md | 296 + vendor/github.com/spf13/pflag/bool.go | 94 + vendor/github.com/spf13/pflag/bool_slice.go | 147 + vendor/github.com/spf13/pflag/bytes.go | 209 + vendor/github.com/spf13/pflag/count.go | 96 + vendor/github.com/spf13/pflag/duration.go | 86 + .../github.com/spf13/pflag/duration_slice.go | 128 + vendor/github.com/spf13/pflag/flag.go | 1227 + vendor/github.com/spf13/pflag/float32.go | 88 + vendor/github.com/spf13/pflag/float64.go | 84 + vendor/github.com/spf13/pflag/golangflag.go | 105 + vendor/github.com/spf13/pflag/int.go | 84 + vendor/github.com/spf13/pflag/int16.go | 88 + vendor/github.com/spf13/pflag/int32.go | 88 + vendor/github.com/spf13/pflag/int64.go | 84 + vendor/github.com/spf13/pflag/int8.go | 88 + vendor/github.com/spf13/pflag/int_slice.go | 128 + vendor/github.com/spf13/pflag/ip.go | 94 + vendor/github.com/spf13/pflag/ip_slice.go | 148 + vendor/github.com/spf13/pflag/ipmask.go | 122 + vendor/github.com/spf13/pflag/ipnet.go | 98 + vendor/github.com/spf13/pflag/string.go | 80 + vendor/github.com/spf13/pflag/string_array.go | 103 + vendor/github.com/spf13/pflag/string_slice.go | 149 + .../github.com/spf13/pflag/string_to_int.go | 149 + .../spf13/pflag/string_to_string.go | 160 + vendor/github.com/spf13/pflag/uint.go | 88 + vendor/github.com/spf13/pflag/uint16.go | 88 + vendor/github.com/spf13/pflag/uint32.go | 88 + vendor/github.com/spf13/pflag/uint64.go | 88 + vendor/github.com/spf13/pflag/uint8.go | 88 + vendor/github.com/spf13/pflag/uint_slice.go | 126 + vendor/github.com/tinylib/msgp/LICENSE | 8 + .../tinylib/msgp/msgp/advise_linux.go | 24 + .../tinylib/msgp/msgp/advise_other.go | 17 + .../github.com/tinylib/msgp/msgp/circular.go | 39 + vendor/github.com/tinylib/msgp/msgp/defs.go | 142 + vendor/github.com/tinylib/msgp/msgp/edit.go | 242 + vendor/github.com/tinylib/msgp/msgp/elsize.go | 99 + vendor/github.com/tinylib/msgp/msgp/errors.go | 314 + .../github.com/tinylib/msgp/msgp/extension.go | 549 + vendor/github.com/tinylib/msgp/msgp/file.go | 92 + .../github.com/tinylib/msgp/msgp/file_port.go | 47 + .../github.com/tinylib/msgp/msgp/integers.go | 174 + vendor/github.com/tinylib/msgp/msgp/json.go | 542 + .../tinylib/msgp/msgp/json_bytes.go | 363 + vendor/github.com/tinylib/msgp/msgp/number.go | 267 + vendor/github.com/tinylib/msgp/msgp/purego.go | 15 + vendor/github.com/tinylib/msgp/msgp/read.go | 1358 + .../tinylib/msgp/msgp/read_bytes.go | 1197 + vendor/github.com/tinylib/msgp/msgp/size.go | 38 + vendor/github.com/tinylib/msgp/msgp/unsafe.go | 41 + vendor/github.com/tinylib/msgp/msgp/write.go | 845 + .../tinylib/msgp/msgp/write_bytes.go | 411 + vendor/go.etcd.io/etcd/LICENSE | 202 + vendor/go.etcd.io/etcd/NOTICE | 5 + vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go | 977 + vendor/go.etcd.io/etcd/auth/authpb/auth.proto | 42 + vendor/go.etcd.io/etcd/clientv3/README.md | 85 + vendor/go.etcd.io/etcd/clientv3/auth.go | 242 + .../etcd/clientv3/balancer/balancer.go | 293 + .../balancer/connectivity/connectivity.go | 93 + .../etcd/clientv3/balancer/picker/doc.go | 16 + .../etcd/clientv3/balancer/picker/err.go | 39 + .../etcd/clientv3/balancer/picker/picker.go | 91 + .../balancer/picker/roundrobin_balanced.go | 95 + .../balancer/resolver/endpoint/endpoint.go | 240 + .../etcd/clientv3/balancer/utils.go | 68 + vendor/go.etcd.io/etcd/clientv3/client.go | 665 + vendor/go.etcd.io/etcd/clientv3/cluster.go | 141 + vendor/go.etcd.io/etcd/clientv3/compact_op.go | 51 + vendor/go.etcd.io/etcd/clientv3/compare.go | 140 + vendor/go.etcd.io/etcd/clientv3/config.go | 88 + .../etcd/clientv3/credentials/credentials.go | 155 + vendor/go.etcd.io/etcd/clientv3/doc.go | 106 + vendor/go.etcd.io/etcd/clientv3/kv.go | 177 + vendor/go.etcd.io/etcd/clientv3/lease.go | 596 + vendor/go.etcd.io/etcd/clientv3/logger.go | 101 + .../go.etcd.io/etcd/clientv3/maintenance.go | 230 + vendor/go.etcd.io/etcd/clientv3/op.go | 560 + vendor/go.etcd.io/etcd/clientv3/options.go | 65 + vendor/go.etcd.io/etcd/clientv3/retry.go | 298 + .../etcd/clientv3/retry_interceptor.go | 389 + vendor/go.etcd.io/etcd/clientv3/sort.go | 37 + vendor/go.etcd.io/etcd/clientv3/txn.go | 151 + vendor/go.etcd.io/etcd/clientv3/utils.go | 49 + vendor/go.etcd.io/etcd/clientv3/watch.go | 987 + .../etcd/etcdserver/api/v3rpc/rpctypes/doc.go | 16 + .../etcdserver/api/v3rpc/rpctypes/error.go | 235 + .../etcd/etcdserver/api/v3rpc/rpctypes/md.go | 20 + .../api/v3rpc/rpctypes/metadatafields.go | 20 + .../etcdserver/etcdserverpb/etcdserver.pb.go | 1041 + .../etcdserver/etcdserverpb/etcdserver.proto | 34 + .../etcdserverpb/raft_internal.pb.go | 2127 + .../etcdserverpb/raft_internal.proto | 75 + .../etcdserverpb/raft_internal_stringer.go | 183 + .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 20086 ++++++ .../etcd/etcdserver/etcdserverpb/rpc.proto | 1146 + vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go | 718 + vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto | 49 + .../etcd/pkg/logutil/discard_logger.go | 46 + vendor/go.etcd.io/etcd/pkg/logutil/doc.go | 16 + .../go.etcd.io/etcd/pkg/logutil/log_level.go | 70 + vendor/go.etcd.io/etcd/pkg/logutil/logger.go | 64 + .../etcd/pkg/logutil/merge_logger.go | 194 + .../etcd/pkg/logutil/package_logger.go | 60 + vendor/go.etcd.io/etcd/pkg/logutil/zap.go | 91 + .../go.etcd.io/etcd/pkg/logutil/zap_grpc.go | 111 + .../etcd/pkg/logutil/zap_journal.go | 92 + .../go.etcd.io/etcd/pkg/logutil/zap_raft.go | 102 + vendor/go.etcd.io/etcd/pkg/systemd/doc.go | 16 + vendor/go.etcd.io/etcd/pkg/systemd/journal.go | 29 + vendor/go.etcd.io/etcd/pkg/types/doc.go | 17 + vendor/go.etcd.io/etcd/pkg/types/id.go | 39 + vendor/go.etcd.io/etcd/pkg/types/set.go | 195 + vendor/go.etcd.io/etcd/pkg/types/slice.go | 22 + vendor/go.etcd.io/etcd/pkg/types/urls.go | 82 + vendor/go.etcd.io/etcd/pkg/types/urlsmap.go | 107 + vendor/go.etcd.io/etcd/raft/OWNERS | 19 + vendor/go.etcd.io/etcd/raft/README.md | 197 + vendor/go.etcd.io/etcd/raft/bootstrap.go | 80 + .../etcd/raft/confchange/confchange.go | 425 + .../etcd/raft/confchange/restore.go | 155 + vendor/go.etcd.io/etcd/raft/design.md | 57 + vendor/go.etcd.io/etcd/raft/doc.go | 300 + vendor/go.etcd.io/etcd/raft/log.go | 372 + vendor/go.etcd.io/etcd/raft/log_unstable.go | 157 + vendor/go.etcd.io/etcd/raft/logger.go | 132 + vendor/go.etcd.io/etcd/raft/node.go | 584 + vendor/go.etcd.io/etcd/raft/quorum/joint.go | 75 + .../go.etcd.io/etcd/raft/quorum/majority.go | 210 + vendor/go.etcd.io/etcd/raft/quorum/quorum.go | 58 + .../etcd/raft/quorum/voteresult_string.go | 26 + vendor/go.etcd.io/etcd/raft/raft.go | 1656 + .../go.etcd.io/etcd/raft/raftpb/confchange.go | 170 + .../go.etcd.io/etcd/raft/raftpb/confstate.go | 45 + vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go | 2646 + vendor/go.etcd.io/etcd/raft/raftpb/raft.proto | 177 + vendor/go.etcd.io/etcd/raft/rawnode.go | 239 + vendor/go.etcd.io/etcd/raft/read_only.go | 121 + vendor/go.etcd.io/etcd/raft/status.go | 106 + vendor/go.etcd.io/etcd/raft/storage.go | 273 + .../go.etcd.io/etcd/raft/tracker/inflights.go | 132 + .../go.etcd.io/etcd/raft/tracker/progress.go | 259 + vendor/go.etcd.io/etcd/raft/tracker/state.go | 42 + .../go.etcd.io/etcd/raft/tracker/tracker.go | 288 + vendor/go.etcd.io/etcd/raft/util.go | 233 + vendor/go.opencensus.io/.gitignore | 9 + vendor/go.opencensus.io/.travis.yml | 17 + vendor/go.opencensus.io/AUTHORS | 1 + vendor/go.opencensus.io/CONTRIBUTING.md | 63 + vendor/go.opencensus.io/Gopkg.lock | 231 + vendor/go.opencensus.io/Gopkg.toml | 36 + vendor/go.opencensus.io/LICENSE | 202 + vendor/go.opencensus.io/Makefile | 96 + vendor/go.opencensus.io/README.md | 263 + vendor/go.opencensus.io/appveyor.yml | 25 + vendor/go.opencensus.io/go.mod | 12 + vendor/go.opencensus.io/go.sum | 61 + vendor/go.opencensus.io/internal/internal.go | 37 + vendor/go.opencensus.io/internal/sanitize.go | 50 + .../internal/tagencoding/tagencoding.go | 75 + .../internal/traceinternals.go | 53 + .../go.opencensus.io/metric/metricdata/doc.go | 19 + .../metric/metricdata/exemplar.go | 38 + .../metric/metricdata/label.go | 35 + .../metric/metricdata/metric.go | 46 + .../metric/metricdata/point.go | 193 + .../metric/metricdata/type_string.go | 16 + .../metric/metricdata/unit.go | 27 + .../metric/metricproducer/manager.go | 78 + .../metric/metricproducer/producer.go | 28 + vendor/go.opencensus.io/opencensus.go | 21 + .../go.opencensus.io/plugin/ochttp/client.go | 117 + .../plugin/ochttp/client_stats.go | 143 + vendor/go.opencensus.io/plugin/ochttp/doc.go | 19 + .../plugin/ochttp/propagation/b3/b3.go | 123 + .../go.opencensus.io/plugin/ochttp/route.go | 61 + .../go.opencensus.io/plugin/ochttp/server.go | 449 + .../ochttp/span_annotating_client_trace.go | 169 + .../go.opencensus.io/plugin/ochttp/stats.go | 292 + .../go.opencensus.io/plugin/ochttp/trace.go | 239 + .../plugin/ochttp/wrapped_body.go | 44 + vendor/go.opencensus.io/resource/resource.go | 164 + vendor/go.opencensus.io/stats/doc.go | 69 + .../go.opencensus.io/stats/internal/record.go | 25 + vendor/go.opencensus.io/stats/measure.go | 109 + .../go.opencensus.io/stats/measure_float64.go | 55 + .../go.opencensus.io/stats/measure_int64.go | 55 + vendor/go.opencensus.io/stats/record.go | 117 + vendor/go.opencensus.io/stats/units.go | 25 + .../stats/view/aggregation.go | 120 + .../stats/view/aggregation_data.go | 293 + .../go.opencensus.io/stats/view/collector.go | 86 + vendor/go.opencensus.io/stats/view/doc.go | 47 + vendor/go.opencensus.io/stats/view/export.go | 58 + vendor/go.opencensus.io/stats/view/view.go | 221 + .../stats/view/view_to_metric.go | 140 + vendor/go.opencensus.io/stats/view/worker.go | 281 + .../stats/view/worker_commands.go | 186 + vendor/go.opencensus.io/tag/context.go | 43 + vendor/go.opencensus.io/tag/doc.go | 26 + vendor/go.opencensus.io/tag/key.go | 45 + vendor/go.opencensus.io/tag/map.go | 229 + vendor/go.opencensus.io/tag/map_codec.go | 239 + vendor/go.opencensus.io/tag/metadata.go | 52 + vendor/go.opencensus.io/tag/profile_19.go | 31 + vendor/go.opencensus.io/tag/profile_not19.go | 23 + vendor/go.opencensus.io/tag/validate.go | 56 + vendor/go.opencensus.io/trace/basetypes.go | 119 + vendor/go.opencensus.io/trace/config.go | 86 + vendor/go.opencensus.io/trace/doc.go | 53 + vendor/go.opencensus.io/trace/evictedqueue.go | 38 + vendor/go.opencensus.io/trace/export.go | 97 + .../trace/internal/internal.go | 22 + vendor/go.opencensus.io/trace/lrumap.go | 37 + .../trace/propagation/propagation.go | 108 + vendor/go.opencensus.io/trace/sampling.go | 75 + vendor/go.opencensus.io/trace/spanbucket.go | 130 + vendor/go.opencensus.io/trace/spanstore.go | 306 + vendor/go.opencensus.io/trace/status_codes.go | 37 + vendor/go.opencensus.io/trace/trace.go | 598 + vendor/go.opencensus.io/trace/trace_go11.go | 32 + .../go.opencensus.io/trace/trace_nongo11.go | 25 + .../trace/tracestate/tracestate.go | 147 + vendor/go.uber.org/atomic/.codecov.yml | 15 + vendor/go.uber.org/atomic/.gitignore | 11 + vendor/go.uber.org/atomic/.travis.yml | 23 + vendor/go.uber.org/atomic/LICENSE.txt | 19 + vendor/go.uber.org/atomic/Makefile | 64 + vendor/go.uber.org/atomic/README.md | 36 + vendor/go.uber.org/atomic/atomic.go | 351 + vendor/go.uber.org/atomic/glide.lock | 17 + vendor/go.uber.org/atomic/glide.yaml | 6 + vendor/go.uber.org/atomic/string.go | 49 + vendor/go.uber.org/multierr/.codecov.yml | 15 + vendor/go.uber.org/multierr/.gitignore | 1 + vendor/go.uber.org/multierr/.travis.yml | 33 + vendor/go.uber.org/multierr/CHANGELOG.md | 28 + vendor/go.uber.org/multierr/LICENSE.txt | 19 + vendor/go.uber.org/multierr/Makefile | 74 + vendor/go.uber.org/multierr/README.md | 23 + vendor/go.uber.org/multierr/error.go | 401 + vendor/go.uber.org/multierr/glide.lock | 19 + vendor/go.uber.org/multierr/glide.yaml | 8 + vendor/go.uber.org/zap/.codecov.yml | 17 + vendor/go.uber.org/zap/.gitignore | 28 + vendor/go.uber.org/zap/.readme.tmpl | 108 + vendor/go.uber.org/zap/.travis.yml | 21 + vendor/go.uber.org/zap/CHANGELOG.md | 327 + vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 + vendor/go.uber.org/zap/CONTRIBUTING.md | 81 + vendor/go.uber.org/zap/FAQ.md | 155 + vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/Makefile | 76 + vendor/go.uber.org/zap/README.md | 136 + vendor/go.uber.org/zap/array.go | 320 + vendor/go.uber.org/zap/buffer/buffer.go | 115 + vendor/go.uber.org/zap/buffer/pool.go | 49 + vendor/go.uber.org/zap/check_license.sh | 17 + vendor/go.uber.org/zap/config.go | 243 + vendor/go.uber.org/zap/doc.go | 113 + vendor/go.uber.org/zap/encoder.go | 75 + vendor/go.uber.org/zap/error.go | 80 + vendor/go.uber.org/zap/field.go | 310 + vendor/go.uber.org/zap/flag.go | 39 + vendor/go.uber.org/zap/glide.lock | 76 + vendor/go.uber.org/zap/glide.yaml | 35 + vendor/go.uber.org/zap/global.go | 168 + vendor/go.uber.org/zap/global_go112.go | 26 + vendor/go.uber.org/zap/global_prego112.go | 26 + vendor/go.uber.org/zap/http_handler.go | 81 + .../zap/internal/bufferpool/bufferpool.go | 31 + .../go.uber.org/zap/internal/color/color.go | 44 + vendor/go.uber.org/zap/internal/exit/exit.go | 64 + vendor/go.uber.org/zap/level.go | 132 + vendor/go.uber.org/zap/logger.go | 305 + vendor/go.uber.org/zap/options.go | 109 + vendor/go.uber.org/zap/sink.go | 161 + vendor/go.uber.org/zap/stacktrace.go | 126 + vendor/go.uber.org/zap/sugar.go | 304 + vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 99 + .../zap/zapcore/console_encoder.go | 147 + vendor/go.uber.org/zap/zapcore/core.go | 113 + vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 348 + vendor/go.uber.org/zap/zapcore/entry.go | 257 + vendor/go.uber.org/zap/zapcore/error.go | 120 + vendor/go.uber.org/zap/zapcore/field.go | 212 + vendor/go.uber.org/zap/zapcore/hook.go | 68 + .../go.uber.org/zap/zapcore/json_encoder.go | 505 + vendor/go.uber.org/zap/zapcore/level.go | 175 + .../go.uber.org/zap/zapcore/level_strings.go | 46 + vendor/go.uber.org/zap/zapcore/marshaler.go | 53 + .../go.uber.org/zap/zapcore/memory_encoder.go | 179 + vendor/go.uber.org/zap/zapcore/sampler.go | 134 + vendor/go.uber.org/zap/zapcore/tee.go | 81 + .../go.uber.org/zap/zapcore/write_syncer.go | 123 + vendor/golang.org/x/crypto/AUTHORS | 3 + vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 222 + .../x/crypto/ed25519/ed25519_go113.go | 73 + .../ed25519/internal/edwards25519/const.go | 1422 + .../internal/edwards25519/edwards25519.go | 1793 + .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 + vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 + vendor/golang.org/x/crypto/pkcs12/errors.go | 23 + .../x/crypto/pkcs12/internal/rc2/rc2.go | 271 + vendor/golang.org/x/crypto/pkcs12/mac.go | 45 + vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 + vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 350 + vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 + .../x/crypto/ssh/terminal/terminal.go | 966 + .../golang.org/x/crypto/ssh/terminal/util.go | 114 + .../x/crypto/ssh/terminal/util_aix.go | 12 + .../x/crypto/ssh/terminal/util_bsd.go | 12 + .../x/crypto/ssh/terminal/util_linux.go | 10 + .../x/crypto/ssh/terminal/util_plan9.go | 58 + .../x/crypto/ssh/terminal/util_solaris.go | 124 + .../x/crypto/ssh/terminal/util_windows.go | 105 + vendor/golang.org/x/net/AUTHORS | 3 + vendor/golang.org/x/net/CONTRIBUTORS | 3 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/bpf/asm.go | 41 + vendor/golang.org/x/net/bpf/constants.go | 222 + vendor/golang.org/x/net/bpf/doc.go | 82 + vendor/golang.org/x/net/bpf/instructions.go | 726 + vendor/golang.org/x/net/bpf/setter.go | 10 + vendor/golang.org/x/net/bpf/vm.go | 150 + .../golang.org/x/net/bpf/vm_instructions.go | 182 + vendor/golang.org/x/net/context/context.go | 56 + .../x/net/context/ctxhttp/ctxhttp.go | 71 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/net/context/pre_go19.go | 109 + vendor/golang.org/x/net/http/httpguts/guts.go | 50 + .../golang.org/x/net/http/httpguts/httplex.go | 346 + vendor/golang.org/x/net/http2/.gitignore | 2 + vendor/golang.org/x/net/http2/Dockerfile | 51 + vendor/golang.org/x/net/http2/Makefile | 3 + vendor/golang.org/x/net/http2/README | 20 + vendor/golang.org/x/net/http2/ciphers.go | 641 + .../x/net/http2/client_conn_pool.go | 282 + vendor/golang.org/x/net/http2/databuffer.go | 146 + vendor/golang.org/x/net/http2/errors.go | 133 + vendor/golang.org/x/net/http2/flow.go | 50 + vendor/golang.org/x/net/http2/frame.go | 1614 + vendor/golang.org/x/net/http2/go111.go | 29 + vendor/golang.org/x/net/http2/gotrack.go | 170 + vendor/golang.org/x/net/http2/headermap.go | 88 + vendor/golang.org/x/net/http2/hpack/encode.go | 240 + vendor/golang.org/x/net/http2/hpack/hpack.go | 504 + .../golang.org/x/net/http2/hpack/huffman.go | 222 + vendor/golang.org/x/net/http2/hpack/tables.go | 479 + vendor/golang.org/x/net/http2/http2.go | 384 + vendor/golang.org/x/net/http2/not_go111.go | 20 + vendor/golang.org/x/net/http2/pipe.go | 168 + vendor/golang.org/x/net/http2/server.go | 2965 + vendor/golang.org/x/net/http2/transport.go | 2670 + vendor/golang.org/x/net/http2/write.go | 365 + vendor/golang.org/x/net/http2/writesched.go | 248 + .../x/net/http2/writesched_priority.go | 452 + .../x/net/http2/writesched_random.go | 77 + vendor/golang.org/x/net/idna/idna10.0.0.go | 734 + vendor/golang.org/x/net/idna/idna9.0.0.go | 682 + vendor/golang.org/x/net/idna/punycode.go | 203 + vendor/golang.org/x/net/idna/tables10.0.0.go | 4559 ++ vendor/golang.org/x/net/idna/tables11.0.0.go | 4653 ++ vendor/golang.org/x/net/idna/tables12.00.go | 4733 ++ vendor/golang.org/x/net/idna/tables9.0.0.go | 4486 ++ vendor/golang.org/x/net/idna/trie.go | 72 + vendor/golang.org/x/net/idna/trieval.go | 119 + .../golang.org/x/net/internal/iana/const.go | 223 + .../x/net/internal/socket/cmsghdr.go | 11 + .../x/net/internal/socket/cmsghdr_bsd.go | 13 + .../internal/socket/cmsghdr_linux_32bit.go | 14 + .../internal/socket/cmsghdr_linux_64bit.go | 14 + .../internal/socket/cmsghdr_solaris_64bit.go | 14 + .../x/net/internal/socket/cmsghdr_stub.go | 17 + .../golang.org/x/net/internal/socket/empty.s | 7 + .../x/net/internal/socket/error_unix.go | 31 + .../x/net/internal/socket/error_windows.go | 26 + .../x/net/internal/socket/iovec_32bit.go | 19 + .../x/net/internal/socket/iovec_64bit.go | 19 + .../internal/socket/iovec_solaris_64bit.go | 19 + .../x/net/internal/socket/iovec_stub.go | 11 + .../x/net/internal/socket/mmsghdr_stub.go | 21 + .../x/net/internal/socket/mmsghdr_unix.go | 42 + .../x/net/internal/socket/msghdr_bsd.go | 39 + .../x/net/internal/socket/msghdr_bsdvar.go | 16 + .../x/net/internal/socket/msghdr_linux.go | 36 + .../net/internal/socket/msghdr_linux_32bit.go | 24 + .../net/internal/socket/msghdr_linux_64bit.go | 24 + .../x/net/internal/socket/msghdr_openbsd.go | 14 + .../internal/socket/msghdr_solaris_64bit.go | 36 + .../x/net/internal/socket/msghdr_stub.go | 14 + .../x/net/internal/socket/norace.go | 12 + .../golang.org/x/net/internal/socket/race.go | 37 + .../x/net/internal/socket/rawconn.go | 64 + .../x/net/internal/socket/rawconn_mmsg.go | 79 + .../x/net/internal/socket/rawconn_msg.go | 78 + .../x/net/internal/socket/rawconn_nommsg.go | 15 + .../x/net/internal/socket/rawconn_nomsg.go | 15 + .../x/net/internal/socket/socket.go | 288 + .../golang.org/x/net/internal/socket/sys.go | 33 + .../x/net/internal/socket/sys_bsd.go | 15 + .../x/net/internal/socket/sys_bsdvar.go | 23 + .../x/net/internal/socket/sys_const_unix.go | 17 + .../x/net/internal/socket/sys_darwin.go | 7 + .../x/net/internal/socket/sys_dragonfly.go | 32 + .../net/internal/socket/sys_go1_11_darwin.go | 33 + .../x/net/internal/socket/sys_linkname.go | 42 + .../x/net/internal/socket/sys_linux.go | 27 + .../x/net/internal/socket/sys_linux_386.go | 55 + .../x/net/internal/socket/sys_linux_386.s | 11 + .../x/net/internal/socket/sys_linux_amd64.go | 10 + .../x/net/internal/socket/sys_linux_arm.go | 10 + .../x/net/internal/socket/sys_linux_arm64.go | 10 + .../x/net/internal/socket/sys_linux_mips.go | 10 + .../x/net/internal/socket/sys_linux_mips64.go | 10 + .../net/internal/socket/sys_linux_mips64le.go | 10 + .../x/net/internal/socket/sys_linux_mipsle.go | 10 + .../x/net/internal/socket/sys_linux_ppc64.go | 10 + .../net/internal/socket/sys_linux_ppc64le.go | 10 + .../net/internal/socket/sys_linux_riscv64.go | 12 + .../x/net/internal/socket/sys_linux_s390x.go | 55 + .../x/net/internal/socket/sys_linux_s390x.s | 11 + .../x/net/internal/socket/sys_netbsd.go | 25 + .../x/net/internal/socket/sys_posix.go | 183 + .../x/net/internal/socket/sys_solaris.go | 70 + .../x/net/internal/socket/sys_solaris_amd64.s | 11 + .../x/net/internal/socket/sys_stub.go | 63 + .../x/net/internal/socket/sys_unix.go | 33 + .../x/net/internal/socket/sys_windows.go | 71 + .../x/net/internal/socket/zsys_aix_ppc64.go | 60 + .../x/net/internal/socket/zsys_darwin_386.go | 51 + .../net/internal/socket/zsys_darwin_amd64.go | 53 + .../x/net/internal/socket/zsys_darwin_arm.go | 51 + .../net/internal/socket/zsys_darwin_arm64.go | 53 + .../internal/socket/zsys_dragonfly_amd64.go | 53 + .../x/net/internal/socket/zsys_freebsd_386.go | 51 + .../net/internal/socket/zsys_freebsd_amd64.go | 53 + .../x/net/internal/socket/zsys_freebsd_arm.go | 51 + .../net/internal/socket/zsys_freebsd_arm64.go | 53 + .../x/net/internal/socket/zsys_linux_386.go | 54 + .../x/net/internal/socket/zsys_linux_amd64.go | 57 + .../x/net/internal/socket/zsys_linux_arm.go | 55 + .../x/net/internal/socket/zsys_linux_arm64.go | 58 + .../x/net/internal/socket/zsys_linux_mips.go | 55 + .../net/internal/socket/zsys_linux_mips64.go | 58 + .../internal/socket/zsys_linux_mips64le.go | 58 + .../net/internal/socket/zsys_linux_mipsle.go | 55 + .../x/net/internal/socket/zsys_linux_ppc64.go | 58 + .../net/internal/socket/zsys_linux_ppc64le.go | 58 + .../net/internal/socket/zsys_linux_riscv64.go | 59 + .../x/net/internal/socket/zsys_linux_s390x.go | 58 + .../x/net/internal/socket/zsys_netbsd_386.go | 57 + .../net/internal/socket/zsys_netbsd_amd64.go | 60 + .../x/net/internal/socket/zsys_netbsd_arm.go | 57 + .../net/internal/socket/zsys_netbsd_arm64.go | 59 + .../x/net/internal/socket/zsys_openbsd_386.go | 51 + .../net/internal/socket/zsys_openbsd_amd64.go | 53 + .../x/net/internal/socket/zsys_openbsd_arm.go | 51 + .../net/internal/socket/zsys_openbsd_arm64.go | 53 + .../net/internal/socket/zsys_solaris_amd64.go | 52 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/ipv4/batch.go | 194 + vendor/golang.org/x/net/ipv4/control.go | 144 + vendor/golang.org/x/net/ipv4/control_bsd.go | 41 + .../golang.org/x/net/ipv4/control_pktinfo.go | 39 + vendor/golang.org/x/net/ipv4/control_stub.go | 13 + vendor/golang.org/x/net/ipv4/control_unix.go | 73 + .../golang.org/x/net/ipv4/control_windows.go | 12 + vendor/golang.org/x/net/ipv4/dgramopt.go | 264 + vendor/golang.org/x/net/ipv4/doc.go | 244 + vendor/golang.org/x/net/ipv4/endpoint.go | 186 + vendor/golang.org/x/net/ipv4/genericopt.go | 55 + vendor/golang.org/x/net/ipv4/header.go | 173 + vendor/golang.org/x/net/ipv4/helper.go | 80 + vendor/golang.org/x/net/ipv4/iana.go | 38 + vendor/golang.org/x/net/ipv4/icmp.go | 57 + vendor/golang.org/x/net/ipv4/icmp_linux.go | 25 + vendor/golang.org/x/net/ipv4/icmp_stub.go | 25 + vendor/golang.org/x/net/ipv4/packet.go | 117 + vendor/golang.org/x/net/ipv4/payload.go | 23 + vendor/golang.org/x/net/ipv4/payload_cmsg.go | 84 + .../golang.org/x/net/ipv4/payload_nocmsg.go | 39 + vendor/golang.org/x/net/ipv4/sockopt.go | 44 + vendor/golang.org/x/net/ipv4/sockopt_posix.go | 71 + vendor/golang.org/x/net/ipv4/sockopt_stub.go | 42 + vendor/golang.org/x/net/ipv4/sys_aix.go | 38 + vendor/golang.org/x/net/ipv4/sys_asmreq.go | 119 + .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 + vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 42 + .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_bpf.go | 24 + vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv4/sys_bsd.go | 37 + vendor/golang.org/x/net/ipv4/sys_darwin.go | 65 + vendor/golang.org/x/net/ipv4/sys_dragonfly.go | 35 + vendor/golang.org/x/net/ipv4/sys_freebsd.go | 76 + vendor/golang.org/x/net/ipv4/sys_linux.go | 60 + vendor/golang.org/x/net/ipv4/sys_solaris.go | 57 + vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 52 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_stub.go | 13 + vendor/golang.org/x/net/ipv4/sys_windows.go | 67 + .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 33 + vendor/golang.org/x/net/ipv4/zsys_darwin.go | 99 + .../golang.org/x/net/ipv4/zsys_dragonfly.go | 31 + .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 93 + .../x/net/ipv4/zsys_freebsd_amd64.go | 95 + .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 95 + .../x/net/ipv4/zsys_freebsd_arm64.go | 93 + .../golang.org/x/net/ipv4/zsys_linux_386.go | 130 + .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 132 + .../golang.org/x/net/ipv4/zsys_linux_arm.go | 130 + .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 132 + .../golang.org/x/net/ipv4/zsys_linux_mips.go | 130 + .../x/net/ipv4/zsys_linux_mips64.go | 132 + .../x/net/ipv4/zsys_linux_mips64le.go | 132 + .../x/net/ipv4/zsys_linux_mipsle.go | 130 + .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 130 + .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 132 + .../x/net/ipv4/zsys_linux_ppc64le.go | 132 + .../x/net/ipv4/zsys_linux_riscv64.go | 134 + .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 132 + vendor/golang.org/x/net/ipv4/zsys_netbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_openbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_solaris.go | 100 + vendor/golang.org/x/net/ipv6/batch.go | 116 + vendor/golang.org/x/net/ipv6/control.go | 187 + .../x/net/ipv6/control_rfc2292_unix.go | 48 + .../x/net/ipv6/control_rfc3542_unix.go | 94 + vendor/golang.org/x/net/ipv6/control_stub.go | 13 + vendor/golang.org/x/net/ipv6/control_unix.go | 55 + .../golang.org/x/net/ipv6/control_windows.go | 12 + vendor/golang.org/x/net/ipv6/dgramopt.go | 301 + vendor/golang.org/x/net/ipv6/doc.go | 243 + vendor/golang.org/x/net/ipv6/endpoint.go | 127 + vendor/golang.org/x/net/ipv6/genericopt.go | 56 + vendor/golang.org/x/net/ipv6/header.go | 55 + vendor/golang.org/x/net/ipv6/helper.go | 59 + vendor/golang.org/x/net/ipv6/iana.go | 86 + vendor/golang.org/x/net/ipv6/icmp.go | 60 + vendor/golang.org/x/net/ipv6/icmp_bsd.go | 29 + vendor/golang.org/x/net/ipv6/icmp_linux.go | 27 + vendor/golang.org/x/net/ipv6/icmp_solaris.go | 27 + vendor/golang.org/x/net/ipv6/icmp_stub.go | 23 + vendor/golang.org/x/net/ipv6/icmp_windows.go | 22 + vendor/golang.org/x/net/ipv6/payload.go | 23 + vendor/golang.org/x/net/ipv6/payload_cmsg.go | 70 + .../golang.org/x/net/ipv6/payload_nocmsg.go | 38 + vendor/golang.org/x/net/ipv6/sockopt.go | 43 + vendor/golang.org/x/net/ipv6/sockopt_posix.go | 89 + vendor/golang.org/x/net/ipv6/sockopt_stub.go | 46 + vendor/golang.org/x/net/ipv6/sys_aix.go | 77 + vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 + .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 + vendor/golang.org/x/net/ipv6/sys_bpf.go | 24 + vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv6/sys_bsd.go | 57 + vendor/golang.org/x/net/ipv6/sys_darwin.go | 78 + vendor/golang.org/x/net/ipv6/sys_freebsd.go | 92 + vendor/golang.org/x/net/ipv6/sys_linux.go | 75 + vendor/golang.org/x/net/ipv6/sys_solaris.go | 74 + vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv6/sys_stub.go | 13 + vendor/golang.org/x/net/ipv6/sys_windows.go | 75 + .../golang.org/x/net/ipv6/zsys_aix_ppc64.go | 103 + vendor/golang.org/x/net/ipv6/zsys_darwin.go | 131 + .../golang.org/x/net/ipv6/zsys_dragonfly.go | 88 + .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 122 + .../x/net/ipv6/zsys_freebsd_amd64.go | 124 + .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 124 + .../x/net/ipv6/zsys_freebsd_arm64.go | 122 + .../golang.org/x/net/ipv6/zsys_linux_386.go | 152 + .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 154 + .../golang.org/x/net/ipv6/zsys_linux_arm.go | 152 + .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 154 + .../golang.org/x/net/ipv6/zsys_linux_mips.go | 152 + .../x/net/ipv6/zsys_linux_mips64.go | 154 + .../x/net/ipv6/zsys_linux_mips64le.go | 154 + .../x/net/ipv6/zsys_linux_mipsle.go | 152 + .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 152 + .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 154 + .../x/net/ipv6/zsys_linux_ppc64le.go | 154 + .../x/net/ipv6/zsys_linux_riscv64.go | 156 + .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 154 + vendor/golang.org/x/net/ipv6/zsys_netbsd.go | 84 + vendor/golang.org/x/net/ipv6/zsys_openbsd.go | 93 + vendor/golang.org/x/net/ipv6/zsys_solaris.go | 131 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 + vendor/golang.org/x/oauth2/.travis.yml | 13 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 35 + vendor/golang.org/x/oauth2/go.mod | 10 + vendor/golang.org/x/oauth2/go.sum | 12 + .../golang.org/x/oauth2/google/appengine.go | 38 + .../x/oauth2/google/appengine_gen1.go | 77 + .../x/oauth2/google/appengine_gen2_flex.go | 27 + vendor/golang.org/x/oauth2/google/default.go | 154 + vendor/golang.org/x/oauth2/google/doc.go | 40 + vendor/golang.org/x/oauth2/google/google.go | 209 + vendor/golang.org/x/oauth2/google/jwt.go | 74 + vendor/golang.org/x/oauth2/google/sdk.go | 201 + .../x/oauth2/internal/client_appengine.go | 13 + vendor/golang.org/x/oauth2/internal/doc.go | 6 + vendor/golang.org/x/oauth2/internal/oauth2.go | 37 + vendor/golang.org/x/oauth2/internal/token.go | 294 + .../golang.org/x/oauth2/internal/transport.go | 33 + vendor/golang.org/x/oauth2/jws/jws.go | 182 + vendor/golang.org/x/oauth2/jwt/jwt.go | 185 + vendor/golang.org/x/oauth2/oauth2.go | 381 + vendor/golang.org/x/oauth2/token.go | 178 + vendor/golang.org/x/oauth2/transport.go | 144 + vendor/golang.org/x/sys/AUTHORS | 3 + vendor/golang.org/x/sys/CONTRIBUTORS | 3 + vendor/golang.org/x/sys/LICENSE | 27 + vendor/golang.org/x/sys/PATENTS | 22 + vendor/golang.org/x/sys/unix/.gitignore | 2 + vendor/golang.org/x/sys/unix/README.md | 173 + .../golang.org/x/sys/unix/affinity_linux.go | 86 + vendor/golang.org/x/sys/unix/aliases.go | 14 + vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 17 + vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 + .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 + .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 + .../x/sys/unix/asm_dragonfly_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 + vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 57 + vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 52 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 54 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 44 + .../golang.org/x/sys/unix/asm_linux_riscv64.s | 54 + .../golang.org/x/sys/unix/asm_linux_s390x.s | 56 + vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 + vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 + .../golang.org/x/sys/unix/bluetooth_linux.go | 36 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 + vendor/golang.org/x/sys/unix/constants.go | 13 + vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 + vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 + vendor/golang.org/x/sys/unix/dev_darwin.go | 24 + vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 + vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 + vendor/golang.org/x/sys/unix/dev_linux.go | 42 + vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 + vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 + vendor/golang.org/x/sys/unix/dirent.go | 102 + vendor/golang.org/x/sys/unix/endian_big.go | 9 + vendor/golang.org/x/sys/unix/endian_little.go | 9 + vendor/golang.org/x/sys/unix/env_unix.go | 31 + .../x/sys/unix/errors_freebsd_386.go | 227 + .../x/sys/unix/errors_freebsd_amd64.go | 227 + .../x/sys/unix/errors_freebsd_arm.go | 226 + vendor/golang.org/x/sys/unix/fcntl.go | 32 + vendor/golang.org/x/sys/unix/fcntl_darwin.go | 18 + .../x/sys/unix/fcntl_linux_32bit.go | 13 + vendor/golang.org/x/sys/unix/fdset.go | 29 + vendor/golang.org/x/sys/unix/gccgo.go | 62 + vendor/golang.org/x/sys/unix/gccgo_c.c | 39 + .../x/sys/unix/gccgo_linux_amd64.go | 20 + vendor/golang.org/x/sys/unix/ioctl.go | 65 + vendor/golang.org/x/sys/unix/mkall.sh | 229 + vendor/golang.org/x/sys/unix/mkerrors.sh | 690 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 + .../golang.org/x/sys/unix/pledge_openbsd.go | 163 + vendor/golang.org/x/sys/unix/race.go | 30 + vendor/golang.org/x/sys/unix/race0.go | 25 + .../x/sys/unix/readdirent_getdents.go | 12 + .../x/sys/unix/readdirent_getdirentries.go | 19 + .../x/sys/unix/sockcmsg_dragonfly.go | 16 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 92 + .../x/sys/unix/sockcmsg_unix_other.go | 38 + vendor/golang.org/x/sys/unix/str.go | 26 + vendor/golang.org/x/sys/unix/syscall.go | 53 + vendor/golang.org/x/sys/unix/syscall_aix.go | 536 + .../golang.org/x/sys/unix/syscall_aix_ppc.go | 54 + .../x/sys/unix/syscall_aix_ppc64.go | 85 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 616 + .../x/sys/unix/syscall_darwin.1_12.go | 29 + .../x/sys/unix/syscall_darwin.1_13.go | 101 + .../golang.org/x/sys/unix/syscall_darwin.go | 682 + .../x/sys/unix/syscall_darwin_386.1_11.go | 9 + .../x/sys/unix/syscall_darwin_386.go | 68 + .../x/sys/unix/syscall_darwin_amd64.1_11.go | 9 + .../x/sys/unix/syscall_darwin_amd64.go | 68 + .../x/sys/unix/syscall_darwin_arm.1_11.go | 11 + .../x/sys/unix/syscall_darwin_arm.go | 68 + .../x/sys/unix/syscall_darwin_arm64.1_11.go | 11 + .../x/sys/unix/syscall_darwin_arm64.go | 70 + .../x/sys/unix/syscall_darwin_libSystem.go | 33 + .../x/sys/unix/syscall_dragonfly.go | 539 + .../x/sys/unix/syscall_dragonfly_amd64.go | 56 + .../golang.org/x/sys/unix/syscall_freebsd.go | 884 + .../x/sys/unix/syscall_freebsd_386.go | 56 + .../x/sys/unix/syscall_freebsd_amd64.go | 56 + .../x/sys/unix/syscall_freebsd_arm.go | 56 + .../x/sys/unix/syscall_freebsd_arm64.go | 56 + vendor/golang.org/x/sys/unix/syscall_linux.go | 1973 + .../x/sys/unix/syscall_linux_386.go | 390 + .../x/sys/unix/syscall_linux_amd64.go | 194 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + .../x/sys/unix/syscall_linux_arm.go | 291 + .../x/sys/unix/syscall_linux_arm64.go | 227 + .../golang.org/x/sys/unix/syscall_linux_gc.go | 14 + .../x/sys/unix/syscall_linux_gc_386.go | 16 + .../x/sys/unix/syscall_linux_gccgo_386.go | 30 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 20 + .../x/sys/unix/syscall_linux_mips64x.go | 226 + .../x/sys/unix/syscall_linux_mipsx.go | 238 + .../x/sys/unix/syscall_linux_ppc64x.go | 156 + .../x/sys/unix/syscall_linux_riscv64.go | 230 + .../x/sys/unix/syscall_linux_s390x.go | 342 + .../x/sys/unix/syscall_linux_sparc64.go | 151 + .../golang.org/x/sys/unix/syscall_netbsd.go | 630 + .../x/sys/unix/syscall_netbsd_386.go | 37 + .../x/sys/unix/syscall_netbsd_amd64.go | 37 + .../x/sys/unix/syscall_netbsd_arm.go | 37 + .../x/sys/unix/syscall_netbsd_arm64.go | 37 + .../golang.org/x/sys/unix/syscall_openbsd.go | 414 + .../x/sys/unix/syscall_openbsd_386.go | 41 + .../x/sys/unix/syscall_openbsd_amd64.go | 41 + .../x/sys/unix/syscall_openbsd_arm.go | 41 + .../x/sys/unix/syscall_openbsd_arm64.go | 41 + .../golang.org/x/sys/unix/syscall_solaris.go | 724 + .../x/sys/unix/syscall_solaris_amd64.go | 27 + vendor/golang.org/x/sys/unix/syscall_unix.go | 431 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 + .../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 + vendor/golang.org/x/sys/unix/timestruct.go | 82 + .../golang.org/x/sys/unix/unveil_openbsd.go | 42 + vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1384 + .../x/sys/unix/zerrors_aix_ppc64.go | 1385 + .../x/sys/unix/zerrors_darwin_386.go | 1784 + .../x/sys/unix/zerrors_darwin_amd64.go | 1784 + .../x/sys/unix/zerrors_darwin_arm.go | 1784 + .../x/sys/unix/zerrors_darwin_arm64.go | 1784 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 1651 + .../x/sys/unix/zerrors_freebsd_386.go | 1794 + .../x/sys/unix/zerrors_freebsd_amd64.go | 1795 + .../x/sys/unix/zerrors_freebsd_arm.go | 1803 + .../x/sys/unix/zerrors_freebsd_arm64.go | 1795 + .../x/sys/unix/zerrors_linux_386.go | 3176 + .../x/sys/unix/zerrors_linux_amd64.go | 3176 + .../x/sys/unix/zerrors_linux_arm.go | 3182 + .../x/sys/unix/zerrors_linux_arm64.go | 3169 + .../x/sys/unix/zerrors_linux_mips.go | 3183 + .../x/sys/unix/zerrors_linux_mips64.go | 3183 + .../x/sys/unix/zerrors_linux_mips64le.go | 3183 + .../x/sys/unix/zerrors_linux_mipsle.go | 3183 + .../x/sys/unix/zerrors_linux_ppc64.go | 3238 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3238 + .../x/sys/unix/zerrors_linux_riscv64.go | 3163 + .../x/sys/unix/zerrors_linux_s390x.go | 3236 + .../x/sys/unix/zerrors_linux_sparc64.go | 3232 + .../x/sys/unix/zerrors_netbsd_386.go | 1773 + .../x/sys/unix/zerrors_netbsd_amd64.go | 1763 + .../x/sys/unix/zerrors_netbsd_arm.go | 1752 + .../x/sys/unix/zerrors_netbsd_arm64.go | 1763 + .../x/sys/unix/zerrors_openbsd_386.go | 1657 + .../x/sys/unix/zerrors_openbsd_amd64.go | 1767 + .../x/sys/unix/zerrors_openbsd_arm.go | 1659 + .../x/sys/unix/zerrors_openbsd_arm64.go | 1790 + .../x/sys/unix/zerrors_solaris_amd64.go | 1533 + .../x/sys/unix/zptrace_armnn_linux.go | 41 + .../x/sys/unix/zptrace_linux_arm64.go | 17 + .../x/sys/unix/zptrace_mipsnn_linux.go | 50 + .../x/sys/unix/zptrace_mipsnnle_linux.go | 50 + .../x/sys/unix/zptrace_x86_linux.go | 80 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1484 + .../x/sys/unix/zsyscall_aix_ppc64.go | 1442 + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1192 + .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1070 + .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1811 + .../x/sys/unix/zsyscall_darwin_386.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_386.go | 2499 + .../x/sys/unix/zsyscall_darwin_386.s | 282 + .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1811 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_amd64.go | 2499 + .../x/sys/unix/zsyscall_darwin_amd64.s | 284 + .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1784 + .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm.go | 2484 + .../x/sys/unix/zsyscall_darwin_arm.s | 280 + .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 1784 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm64.go | 2484 + .../x/sys/unix/zsyscall_darwin_arm64.s | 280 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1677 + .../x/sys/unix/zsyscall_freebsd_386.go | 2026 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 2026 + .../x/sys/unix/zsyscall_freebsd_arm.go | 2026 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 2026 + .../x/sys/unix/zsyscall_linux_386.go | 2300 + .../x/sys/unix/zsyscall_linux_amd64.go | 2467 + .../x/sys/unix/zsyscall_linux_arm.go | 2437 + .../x/sys/unix/zsyscall_linux_arm64.go | 2324 + .../x/sys/unix/zsyscall_linux_mips.go | 2480 + .../x/sys/unix/zsyscall_linux_mips64.go | 2451 + .../x/sys/unix/zsyscall_linux_mips64le.go | 2451 + .../x/sys/unix/zsyscall_linux_mipsle.go | 2480 + .../x/sys/unix/zsyscall_linux_ppc64.go | 2529 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 2529 + .../x/sys/unix/zsyscall_linux_riscv64.go | 2304 + .../x/sys/unix/zsyscall_linux_s390x.go | 2299 + .../x/sys/unix/zsyscall_linux_sparc64.go | 2462 + .../x/sys/unix/zsyscall_netbsd_386.go | 1852 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 1852 + .../x/sys/unix/zsyscall_netbsd_arm.go | 1852 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 1852 + .../x/sys/unix/zsyscall_openbsd_386.go | 1693 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 1693 + .../x/sys/unix/zsyscall_openbsd_arm.go | 1693 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 1693 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1954 + .../x/sys/unix/zsysctl_openbsd_386.go | 272 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 270 + .../x/sys/unix/zsysctl_openbsd_arm.go | 272 + .../x/sys/unix/zsysctl_openbsd_arm64.go | 275 + .../x/sys/unix/zsysnum_darwin_386.go | 436 + .../x/sys/unix/zsysnum_darwin_amd64.go | 438 + .../x/sys/unix/zsysnum_darwin_arm.go | 436 + .../x/sys/unix/zsysnum_darwin_arm64.go | 436 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 + .../x/sys/unix/zsysnum_freebsd_386.go | 396 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 396 + .../x/sys/unix/zsysnum_freebsd_arm.go | 396 + .../x/sys/unix/zsysnum_freebsd_arm64.go | 396 + .../x/sys/unix/zsysnum_linux_386.go | 434 + .../x/sys/unix/zsysnum_linux_amd64.go | 356 + .../x/sys/unix/zsysnum_linux_arm.go | 398 + .../x/sys/unix/zsysnum_linux_arm64.go | 300 + .../x/sys/unix/zsysnum_linux_mips.go | 419 + .../x/sys/unix/zsysnum_linux_mips64.go | 349 + .../x/sys/unix/zsysnum_linux_mips64le.go | 349 + .../x/sys/unix/zsysnum_linux_mipsle.go | 419 + .../x/sys/unix/zsysnum_linux_ppc64.go | 398 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 398 + .../x/sys/unix/zsysnum_linux_riscv64.go | 300 + .../x/sys/unix/zsysnum_linux_s390x.go | 363 + .../x/sys/unix/zsysnum_linux_sparc64.go | 377 + .../x/sys/unix/zsysnum_netbsd_386.go | 274 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 274 + .../x/sys/unix/zsysnum_netbsd_arm.go | 274 + .../x/sys/unix/zsysnum_netbsd_arm64.go | 274 + .../x/sys/unix/zsysnum_openbsd_386.go | 218 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 218 + .../x/sys/unix/zsysnum_openbsd_arm.go | 218 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 217 + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 352 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 356 + .../x/sys/unix/ztypes_darwin_386.go | 499 + .../x/sys/unix/ztypes_darwin_amd64.go | 509 + .../x/sys/unix/ztypes_darwin_arm.go | 500 + .../x/sys/unix/ztypes_darwin_arm64.go | 509 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 469 + .../x/sys/unix/ztypes_freebsd_386.go | 700 + .../x/sys/unix/ztypes_freebsd_amd64.go | 706 + .../x/sys/unix/ztypes_freebsd_arm.go | 683 + .../x/sys/unix/ztypes_freebsd_arm64.go | 684 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 2808 + .../x/sys/unix/ztypes_linux_amd64.go | 2823 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2800 + .../x/sys/unix/ztypes_linux_arm64.go | 2802 + .../x/sys/unix/ztypes_linux_mips.go | 2806 + .../x/sys/unix/ztypes_linux_mips64.go | 2805 + .../x/sys/unix/ztypes_linux_mips64le.go | 2805 + .../x/sys/unix/ztypes_linux_mipsle.go | 2806 + .../x/sys/unix/ztypes_linux_ppc64.go | 2812 + .../x/sys/unix/ztypes_linux_ppc64le.go | 2812 + .../x/sys/unix/ztypes_linux_riscv64.go | 2830 + .../x/sys/unix/ztypes_linux_s390x.go | 2826 + .../x/sys/unix/ztypes_linux_sparc64.go | 2807 + .../x/sys/unix/ztypes_netbsd_386.go | 498 + .../x/sys/unix/ztypes_netbsd_amd64.go | 506 + .../x/sys/unix/ztypes_netbsd_arm.go | 503 + .../x/sys/unix/ztypes_netbsd_arm64.go | 506 + .../x/sys/unix/ztypes_openbsd_386.go | 571 + .../x/sys/unix/ztypes_openbsd_amd64.go | 571 + .../x/sys/unix/ztypes_openbsd_arm.go | 572 + .../x/sys/unix/ztypes_openbsd_arm64.go | 565 + .../x/sys/unix/ztypes_solaris_amd64.go | 442 + vendor/golang.org/x/sys/windows/aliases.go | 13 + .../golang.org/x/sys/windows/dll_windows.go | 386 + vendor/golang.org/x/sys/windows/empty.s | 8 + .../golang.org/x/sys/windows/env_windows.go | 61 + vendor/golang.org/x/sys/windows/eventlog.go | 20 + .../golang.org/x/sys/windows/exec_windows.go | 97 + .../x/sys/windows/memory_windows.go | 26 + vendor/golang.org/x/sys/windows/mkerrors.bash | 63 + .../x/sys/windows/mkknownfolderids.bash | 27 + vendor/golang.org/x/sys/windows/mksyscall.go | 9 + vendor/golang.org/x/sys/windows/race.go | 30 + vendor/golang.org/x/sys/windows/race0.go | 25 + .../x/sys/windows/security_windows.go | 1396 + vendor/golang.org/x/sys/windows/service.go | 229 + vendor/golang.org/x/sys/windows/str.go | 22 + vendor/golang.org/x/sys/windows/syscall.go | 74 + .../x/sys/windows/syscall_windows.go | 1453 + .../golang.org/x/sys/windows/types_windows.go | 1777 + .../x/sys/windows/types_windows_386.go | 22 + .../x/sys/windows/types_windows_amd64.go | 22 + .../x/sys/windows/types_windows_arm.go | 22 + .../x/sys/windows/zerrors_windows.go | 6853 ++ .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 4051 ++ vendor/golang.org/x/text/AUTHORS | 3 + vendor/golang.org/x/text/CONTRIBUTORS | 3 + vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + .../x/text/secure/bidirule/bidirule.go | 336 + .../x/text/secure/bidirule/bidirule10.0.0.go | 11 + .../x/text/secure/bidirule/bidirule9.0.0.go | 14 + .../golang.org/x/text/transform/transform.go | 705 + vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 + .../golang.org/x/text/unicode/bidi/bracket.go | 335 + vendor/golang.org/x/text/unicode/bidi/core.go | 1058 + vendor/golang.org/x/text/unicode/bidi/prop.go | 206 + .../x/text/unicode/bidi/tables10.0.0.go | 1815 + .../x/text/unicode/bidi/tables11.0.0.go | 1887 + .../x/text/unicode/bidi/tables9.0.0.go | 1781 + .../golang.org/x/text/unicode/bidi/trieval.go | 60 + .../x/text/unicode/norm/composition.go | 512 + .../x/text/unicode/norm/forminfo.go | 278 + .../golang.org/x/text/unicode/norm/input.go | 109 + vendor/golang.org/x/text/unicode/norm/iter.go | 458 + .../x/text/unicode/norm/normalize.go | 609 + .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/tables10.0.0.go | 7657 +++ .../x/text/unicode/norm/tables11.0.0.go | 7693 +++ .../x/text/unicode/norm/tables9.0.0.go | 7637 +++ .../x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + vendor/golang.org/x/time/AUTHORS | 3 + vendor/golang.org/x/time/CONTRIBUTORS | 3 + vendor/golang.org/x/time/LICENSE | 27 + vendor/golang.org/x/time/PATENTS | 22 + vendor/golang.org/x/time/rate/rate.go | 391 + vendor/golang.org/x/xerrors/LICENSE | 27 + vendor/golang.org/x/xerrors/PATENTS | 22 + vendor/golang.org/x/xerrors/README | 2 + vendor/golang.org/x/xerrors/adaptor.go | 193 + vendor/golang.org/x/xerrors/codereview.cfg | 1 + vendor/golang.org/x/xerrors/doc.go | 25 + vendor/golang.org/x/xerrors/errors.go | 33 + vendor/golang.org/x/xerrors/fmt.go | 109 + vendor/golang.org/x/xerrors/format.go | 34 + vendor/golang.org/x/xerrors/frame.go | 56 + vendor/golang.org/x/xerrors/go.mod | 3 + .../golang.org/x/xerrors/internal/internal.go | 8 + vendor/golang.org/x/xerrors/wrap.go | 106 + vendor/google.golang.org/api/AUTHORS | 11 + vendor/google.golang.org/api/CONTRIBUTORS | 56 + vendor/google.golang.org/api/LICENSE | 27 + .../google.golang.org/api/dns/v1/dns-api.json | 1880 + .../google.golang.org/api/dns/v1/dns-gen.go | 5352 ++ .../api/googleapi/googleapi.go | 408 + .../api/googleapi/transport/apikey.go | 38 + .../google.golang.org/api/googleapi/types.go | 202 + .../google.golang.org/api/internal/creds.go | 92 + .../api/internal/gensupport/buffer.go | 79 + .../api/internal/gensupport/doc.go | 10 + .../api/internal/gensupport/json.go | 211 + .../api/internal/gensupport/jsonfloat.go | 47 + .../api/internal/gensupport/media.go | 363 + .../api/internal/gensupport/params.go | 51 + .../api/internal/gensupport/resumable.go | 241 + .../api/internal/gensupport/send.go | 87 + vendor/google.golang.org/api/internal/pool.go | 51 + .../api/internal/service-account.json | 12 + .../api/internal/settings.go | 87 + .../internal/third_party/uritemplates/LICENSE | 27 + .../third_party/uritemplates/METADATA | 14 + .../third_party/uritemplates/uritemplates.go | 248 + .../third_party/uritemplates/utils.go | 17 + .../api/option/credentials_go19.go | 23 + .../api/option/credentials_notgo19.go | 22 + vendor/google.golang.org/api/option/option.go | 238 + .../api/transport/http/dial.go | 153 + .../api/transport/http/dial_appengine.go | 20 + .../http/internal/propagation/http.go | 86 + .../google.golang.org/appengine/.travis.yml | 20 + .../appengine/CONTRIBUTING.md | 90 + vendor/google.golang.org/appengine/LICENSE | 202 + vendor/google.golang.org/appengine/README.md | 100 + .../google.golang.org/appengine/appengine.go | 135 + .../appengine/appengine_vm.go | 20 + vendor/google.golang.org/appengine/errors.go | 46 + vendor/google.golang.org/appengine/go.mod | 10 + vendor/google.golang.org/appengine/go.sum | 22 + .../google.golang.org/appengine/identity.go | 142 + .../appengine/internal/api.go | 675 + .../appengine/internal/api_classic.go | 169 + .../appengine/internal/api_common.go | 123 + .../appengine/internal/app_id.go | 28 + .../app_identity/app_identity_service.pb.go | 611 + .../app_identity/app_identity_service.proto | 64 + .../appengine/internal/base/api_base.pb.go | 308 + .../appengine/internal/base/api_base.proto | 33 + .../internal/datastore/datastore_v3.pb.go | 4367 ++ .../internal/datastore/datastore_v3.proto | 551 + .../appengine/internal/identity.go | 55 + .../appengine/internal/identity_classic.go | 61 + .../appengine/internal/identity_flex.go | 11 + .../appengine/internal/identity_vm.go | 134 + .../appengine/internal/internal.go | 110 + .../appengine/internal/log/log_service.pb.go | 1313 + .../appengine/internal/log/log_service.proto | 150 + .../appengine/internal/main.go | 16 + .../appengine/internal/main_common.go | 7 + .../appengine/internal/main_vm.go | 69 + .../appengine/internal/metadata.go | 60 + .../internal/modules/modules_service.pb.go | 786 + .../internal/modules/modules_service.proto | 80 + .../appengine/internal/net.go | 56 + .../appengine/internal/regen.sh | 40 + .../internal/remote_api/remote_api.pb.go | 361 + .../internal/remote_api/remote_api.proto | 44 + .../appengine/internal/transaction.go | 115 + .../internal/urlfetch/urlfetch_service.pb.go | 527 + .../internal/urlfetch/urlfetch_service.proto | 64 + .../google.golang.org/appengine/namespace.go | 25 + vendor/google.golang.org/appengine/timeout.go | 20 + .../appengine/travis_install.sh | 18 + .../appengine/travis_test.sh | 12 + .../appengine/urlfetch/urlfetch.go | 210 + vendor/google.golang.org/genproto/LICENSE | 202 + .../googleapis/rpc/status/status.pb.go | 163 + vendor/google.golang.org/grpc/.travis.yml | 42 + vendor/google.golang.org/grpc/AUTHORS | 1 + .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 62 + vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/MAINTAINERS.md | 27 + vendor/google.golang.org/grpc/Makefile | 60 + vendor/google.golang.org/grpc/README.md | 121 + vendor/google.golang.org/grpc/backoff.go | 58 + .../google.golang.org/grpc/backoff/backoff.go | 52 + vendor/google.golang.org/grpc/balancer.go | 391 + .../grpc/balancer/balancer.go | 373 + .../grpc/balancer/base/balancer.go | 191 + .../grpc/balancer/base/base.go | 64 + .../grpc/balancer/roundrobin/roundrobin.go | 83 + .../grpc/balancer_conn_wrappers.go | 254 + .../grpc/balancer_v1_wrapper.go | 334 + .../grpc_binarylog_v1/binarylog.pb.go | 900 + vendor/google.golang.org/grpc/call.go | 74 + vendor/google.golang.org/grpc/clientconn.go | 1539 + vendor/google.golang.org/grpc/codec.go | 50 + vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/codes/code_string.go | 62 + vendor/google.golang.org/grpc/codes/codes.go | 198 + .../grpc/connectivity/connectivity.go | 73 + .../grpc/credentials/credentials.go | 364 + .../grpc/credentials/internal/syscallconn.go | 61 + .../internal/syscallconn_appengine.go | 30 + .../grpc/credentials/tls13.go | 30 + vendor/google.golang.org/grpc/dialoptions.go | 589 + vendor/google.golang.org/grpc/doc.go | 24 + .../grpc/encoding/encoding.go | 122 + .../grpc/encoding/proto/proto.go | 110 + vendor/google.golang.org/grpc/go.mod | 16 + vendor/google.golang.org/grpc/go.sum | 53 + .../google.golang.org/grpc/grpclog/grpclog.go | 126 + .../google.golang.org/grpc/grpclog/logger.go | 85 + .../grpc/grpclog/loggerv2.go | 195 + vendor/google.golang.org/grpc/install_gae.sh | 6 + vendor/google.golang.org/grpc/interceptor.go | 77 + .../grpc/internal/backoff/backoff.go | 73 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 167 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 210 + .../grpc/internal/binarylog/method_logger.go | 423 + .../grpc/internal/binarylog/regenerate.sh | 33 + .../grpc/internal/binarylog/sink.go | 162 + .../grpc/internal/binarylog/util.go | 41 + .../grpc/internal/buffer/unbounded.go | 78 + .../grpc/internal/channelz/funcs.go | 727 + .../grpc/internal/channelz/types.go | 702 + .../grpc/internal/channelz/types_linux.go | 53 + .../grpc/internal/channelz/types_nonlinux.go | 44 + .../grpc/internal/channelz/util_linux.go | 39 + .../grpc/internal/channelz/util_nonlinux.go | 26 + .../grpc/internal/envconfig/envconfig.go | 35 + .../grpc/internal/grpcrand/grpcrand.go | 56 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/internal.go | 74 + .../internal/resolver/dns/dns_resolver.go | 460 + .../resolver/passthrough/passthrough.go | 57 + .../grpc/internal/syscall/syscall_linux.go | 114 + .../grpc/internal/syscall/syscall_nonlinux.go | 73 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 926 + .../grpc/internal/transport/defaults.go | 49 + .../grpc/internal/transport/flowcontrol.go | 217 + .../grpc/internal/transport/handler_server.go | 431 + .../grpc/internal/transport/http2_client.go | 1440 + .../grpc/internal/transport/http2_server.go | 1231 + .../grpc/internal/transport/http_util.go | 677 + .../grpc/internal/transport/log.go | 44 + .../grpc/internal/transport/transport.go | 807 + .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 209 + .../grpc/naming/dns_resolver.go | 293 + .../google.golang.org/grpc/naming/naming.go | 68 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 197 + vendor/google.golang.org/grpc/pickfirst.go | 118 + vendor/google.golang.org/grpc/preloader.go | 64 + vendor/google.golang.org/grpc/proxy.go | 152 + .../grpc/resolver/dns/dns_resolver.go | 36 + .../grpc/resolver/passthrough/passthrough.go | 26 + .../grpc/resolver/resolver.go | 231 + .../grpc/resolver_conn_wrapper.go | 265 + vendor/google.golang.org/grpc/rpc_util.go | 886 + vendor/google.golang.org/grpc/server.go | 1522 + .../google.golang.org/grpc/service_config.go | 434 + .../grpc/serviceconfig/serviceconfig.go | 41 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 300 + .../google.golang.org/grpc/status/status.go | 228 + vendor/google.golang.org/grpc/stream.go | 1529 + vendor/google.golang.org/grpc/tap/tap.go | 51 + vendor/google.golang.org/grpc/trace.go | 126 + vendor/google.golang.org/grpc/version.go | 22 + vendor/google.golang.org/grpc/vet.sh | 142 + .../gopkg.in/DataDog/dd-trace-go.v1/LICENSE | 24 + .../dd-trace-go.v1/LICENSE-3rdparty.csv | 2 + .../DataDog/dd-trace-go.v1/ddtrace/Gopkg.toml | 9 + .../DataDog/dd-trace-go.v1/ddtrace/ddtrace.go | 132 + .../dd-trace-go.v1/ddtrace/ext/app_types.go | 78 + .../dd-trace-go.v1/ddtrace/ext/cassandra.go | 26 + .../DataDog/dd-trace-go.v1/ddtrace/ext/db.go | 21 + .../dd-trace-go.v1/ddtrace/ext/peer.go | 19 + .../dd-trace-go.v1/ddtrace/ext/priority.go | 27 + .../dd-trace-go.v1/ddtrace/ext/system.go | 12 + .../dd-trace-go.v1/ddtrace/ext/tags.go | 86 + .../ddtrace/internal/globaltracer.go | 103 + .../ddtrace/opentracer/option.go | 34 + .../dd-trace-go.v1/ddtrace/opentracer/span.go | 88 + .../ddtrace/opentracer/tracer.go | 86 + .../dd-trace-go.v1/ddtrace/tracer/context.go | 47 + .../dd-trace-go.v1/ddtrace/tracer/doc.go | 55 + .../dd-trace-go.v1/ddtrace/tracer/metrics.go | 103 + .../dd-trace-go.v1/ddtrace/tracer/option.go | 306 + .../dd-trace-go.v1/ddtrace/tracer/payload.go | 144 + .../ddtrace/tracer/propagator.go | 57 + .../dd-trace-go.v1/ddtrace/tracer/rand.go | 56 + .../dd-trace-go.v1/ddtrace/tracer/sampler.go | 149 + .../dd-trace-go.v1/ddtrace/tracer/span.go | 346 + .../ddtrace/tracer/span_msgp.go | 453 + .../ddtrace/tracer/spancontext.go | 247 + .../dd-trace-go.v1/ddtrace/tracer/textmap.go | 362 + .../dd-trace-go.v1/ddtrace/tracer/time.go | 15 + .../ddtrace/tracer/time_windows.go | 40 + .../dd-trace-go.v1/ddtrace/tracer/tracer.go | 373 + .../ddtrace/tracer/tracer_go11.go | 21 + .../ddtrace/tracer/tracer_nongo11.go | 12 + .../ddtrace/tracer/transport.go | 187 + .../dd-trace-go.v1/ddtrace/tracer/util.go | 55 + .../internal/globalconfig/globalconfig.go | 38 + .../dd-trace-go.v1/internal/log/log.go | 168 + .../internal/version/version.go | 11 + vendor/gopkg.in/inf.v0/LICENSE | 28 + vendor/gopkg.in/inf.v0/dec.go | 615 + vendor/gopkg.in/inf.v0/rounder.go | 145 + vendor/gopkg.in/yaml.v2/.travis.yml | 12 + vendor/gopkg.in/yaml.v2/LICENSE | 201 + vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/NOTICE | 13 + vendor/gopkg.in/yaml.v2/README.md | 133 + vendor/gopkg.in/yaml.v2/apic.go | 739 + vendor/gopkg.in/yaml.v2/decode.go | 775 + vendor/gopkg.in/yaml.v2/emitterc.go | 1685 + vendor/gopkg.in/yaml.v2/encode.go | 390 + vendor/gopkg.in/yaml.v2/go.mod | 5 + vendor/gopkg.in/yaml.v2/parserc.go | 1095 + vendor/gopkg.in/yaml.v2/readerc.go | 412 + vendor/gopkg.in/yaml.v2/resolve.go | 258 + vendor/gopkg.in/yaml.v2/scannerc.go | 2696 + vendor/gopkg.in/yaml.v2/sorter.go | 113 + vendor/gopkg.in/yaml.v2/writerc.go | 26 + vendor/gopkg.in/yaml.v2/yaml.go | 466 + vendor/gopkg.in/yaml.v2/yamlh.go | 738 + vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 + vendor/k8s.io/api/LICENSE | 202 + .../api/admissionregistration/v1beta1/doc.go | 26 + .../v1beta1/generated.pb.go | 2979 + .../v1beta1/generated.proto | 485 + .../admissionregistration/v1beta1/register.go | 53 + .../admissionregistration/v1beta1/types.go | 557 + .../v1beta1/types_swagger_doc_generated.go | 151 + .../v1beta1/zz_generated.deepcopy.go | 396 + vendor/k8s.io/api/apps/v1/doc.go | 21 + vendor/k8s.io/api/apps/v1/generated.pb.go | 6924 ++ vendor/k8s.io/api/apps/v1/generated.proto | 701 + vendor/k8s.io/api/apps/v1/register.go | 60 + vendor/k8s.io/api/apps/v1/types.go | 826 + .../apps/v1/types_swagger_doc_generated.go | 365 + .../api/apps/v1/zz_generated.deepcopy.go | 772 + vendor/k8s.io/api/apps/v1beta1/doc.go | 21 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 5275 ++ .../k8s.io/api/apps/v1beta1/generated.proto | 484 + vendor/k8s.io/api/apps/v1beta1/register.go | 58 + vendor/k8s.io/api/apps/v1beta1/types.go | 567 + .../v1beta1/types_swagger_doc_generated.go | 273 + .../api/apps/v1beta1/zz_generated.deepcopy.go | 594 + vendor/k8s.io/api/apps/v1beta2/doc.go | 21 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 7567 +++ .../k8s.io/api/apps/v1beta2/generated.proto | 752 + vendor/k8s.io/api/apps/v1beta2/register.go | 61 + vendor/k8s.io/api/apps/v1beta2/types.go | 876 + .../v1beta2/types_swagger_doc_generated.go | 396 + .../api/apps/v1beta2/zz_generated.deepcopy.go | 839 + .../api/auditregistration/v1alpha1/doc.go | 23 + .../v1alpha1/generated.pb.go | 1715 + .../v1alpha1/generated.proto | 162 + .../auditregistration/v1alpha1/register.go | 56 + .../api/auditregistration/v1alpha1/types.go | 198 + .../v1alpha1/types_swagger_doc_generated.go | 111 + .../v1alpha1/zz_generated.deepcopy.go | 229 + vendor/k8s.io/api/authentication/v1/doc.go | 22 + .../api/authentication/v1/generated.pb.go | 2233 + .../api/authentication/v1/generated.proto | 182 + .../k8s.io/api/authentication/v1/register.go | 52 + vendor/k8s.io/api/authentication/v1/types.go | 189 + .../v1/types_swagger_doc_generated.go | 115 + .../v1/zz_generated.deepcopy.go | 244 + .../k8s.io/api/authentication/v1beta1/doc.go | 22 + .../authentication/v1beta1/generated.pb.go | 1388 + .../authentication/v1beta1/generated.proto | 118 + .../api/authentication/v1beta1/register.go | 51 + .../api/authentication/v1beta1/types.go | 110 + .../v1beta1/types_swagger_doc_generated.go | 74 + .../v1beta1/zz_generated.deepcopy.go | 152 + vendor/k8s.io/api/authorization/v1/doc.go | 23 + .../api/authorization/v1/generated.pb.go | 3511 + .../api/authorization/v1/generated.proto | 272 + .../k8s.io/api/authorization/v1/register.go | 55 + vendor/k8s.io/api/authorization/v1/types.go | 268 + .../v1/types_swagger_doc_generated.go | 173 + .../authorization/v1/zz_generated.deepcopy.go | 385 + .../k8s.io/api/authorization/v1beta1/doc.go | 23 + .../api/authorization/v1beta1/generated.pb.go | 3511 + .../api/authorization/v1beta1/generated.proto | 272 + .../api/authorization/v1beta1/register.go | 55 + .../k8s.io/api/authorization/v1beta1/types.go | 268 + .../v1beta1/types_swagger_doc_generated.go | 173 + .../v1beta1/zz_generated.deepcopy.go | 385 + vendor/k8s.io/api/autoscaling/v1/doc.go | 21 + .../k8s.io/api/autoscaling/v1/generated.pb.go | 4691 ++ .../k8s.io/api/autoscaling/v1/generated.proto | 415 + vendor/k8s.io/api/autoscaling/v1/register.go | 53 + vendor/k8s.io/api/autoscaling/v1/types.go | 428 + .../v1/types_swagger_doc_generated.go | 250 + .../autoscaling/v1/zz_generated.deepcopy.go | 515 + vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 21 + .../api/autoscaling/v2beta1/generated.pb.go | 4307 ++ .../api/autoscaling/v2beta1/generated.proto | 397 + .../api/autoscaling/v2beta1/register.go | 52 + .../k8s.io/api/autoscaling/v2beta1/types.go | 405 + .../v2beta1/types_swagger_doc_generated.go | 221 + .../v2beta1/zz_generated.deepcopy.go | 466 + vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 21 + .../api/autoscaling/v2beta2/generated.pb.go | 4419 ++ .../api/autoscaling/v2beta2/generated.proto | 369 + .../api/autoscaling/v2beta2/register.go | 50 + .../k8s.io/api/autoscaling/v2beta2/types.go | 393 + .../v2beta2/types_swagger_doc_generated.go | 240 + .../v2beta2/zz_generated.deepcopy.go | 487 + vendor/k8s.io/api/batch/v1/doc.go | 21 + vendor/k8s.io/api/batch/v1/generated.pb.go | 1627 + vendor/k8s.io/api/batch/v1/generated.proto | 184 + vendor/k8s.io/api/batch/v1/register.go | 52 + vendor/k8s.io/api/batch/v1/types.go | 193 + .../batch/v1/types_swagger_doc_generated.go | 95 + .../api/batch/v1/zz_generated.deepcopy.go | 188 + vendor/k8s.io/api/batch/v1beta1/doc.go | 21 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 1490 + .../k8s.io/api/batch/v1beta1/generated.proto | 137 + vendor/k8s.io/api/batch/v1beta1/register.go | 53 + vendor/k8s.io/api/batch/v1beta1/types.go | 158 + .../v1beta1/types_swagger_doc_generated.go | 96 + .../batch/v1beta1/zz_generated.deepcopy.go | 194 + vendor/k8s.io/api/batch/v2alpha1/doc.go | 21 + .../k8s.io/api/batch/v2alpha1/generated.pb.go | 1490 + .../k8s.io/api/batch/v2alpha1/generated.proto | 135 + vendor/k8s.io/api/batch/v2alpha1/register.go | 53 + vendor/k8s.io/api/batch/v2alpha1/types.go | 156 + .../v2alpha1/types_swagger_doc_generated.go | 96 + .../batch/v2alpha1/zz_generated.deepcopy.go | 194 + vendor/k8s.io/api/certificates/v1beta1/doc.go | 23 + .../api/certificates/v1beta1/generated.pb.go | 1676 + .../api/certificates/v1beta1/generated.proto | 121 + .../api/certificates/v1beta1/register.go | 59 + .../k8s.io/api/certificates/v1beta1/types.go | 155 + .../v1beta1/types_swagger_doc_generated.go | 74 + .../v1beta1/zz_generated.deepcopy.go | 197 + vendor/k8s.io/api/coordination/v1/doc.go | 23 + .../api/coordination/v1/generated.pb.go | 864 + .../api/coordination/v1/generated.proto | 80 + vendor/k8s.io/api/coordination/v1/register.go | 53 + vendor/k8s.io/api/coordination/v1/types.go | 74 + .../v1/types_swagger_doc_generated.go | 63 + .../coordination/v1/zz_generated.deepcopy.go | 124 + vendor/k8s.io/api/coordination/v1beta1/doc.go | 23 + .../api/coordination/v1beta1/generated.pb.go | 864 + .../api/coordination/v1beta1/generated.proto | 80 + .../api/coordination/v1beta1/register.go | 53 + .../k8s.io/api/coordination/v1beta1/types.go | 74 + .../v1beta1/types_swagger_doc_generated.go | 63 + .../v1beta1/zz_generated.deepcopy.go | 124 + .../api/core/v1/annotation_key_constants.go | 106 + vendor/k8s.io/api/core/v1/doc.go | 22 + vendor/k8s.io/api/core/v1/generated.pb.go | 53360 ++++++++++++++++ vendor/k8s.io/api/core/v1/generated.proto | 4885 ++ vendor/k8s.io/api/core/v1/objectreference.go | 33 + vendor/k8s.io/api/core/v1/register.go | 99 + vendor/k8s.io/api/core/v1/resource.go | 56 + vendor/k8s.io/api/core/v1/taint.go | 33 + vendor/k8s.io/api/core/v1/toleration.go | 56 + vendor/k8s.io/api/core/v1/types.go | 5464 ++ .../core/v1/types_swagger_doc_generated.go | 2376 + .../k8s.io/api/core/v1/well_known_labels.go | 36 + .../api/core/v1/zz_generated.deepcopy.go | 5519 ++ vendor/k8s.io/api/events/v1beta1/doc.go | 23 + .../k8s.io/api/events/v1beta1/generated.pb.go | 1287 + .../k8s.io/api/events/v1beta1/generated.proto | 122 + vendor/k8s.io/api/events/v1beta1/register.go | 53 + vendor/k8s.io/api/events/v1beta1/types.go | 123 + .../v1beta1/types_swagger_doc_generated.go | 73 + .../events/v1beta1/zz_generated.deepcopy.go | 117 + vendor/k8s.io/api/extensions/v1beta1/doc.go | 21 + .../api/extensions/v1beta1/generated.pb.go | 12714 ++++ .../api/extensions/v1beta1/generated.proto | 1194 + .../k8s.io/api/extensions/v1beta1/register.go | 66 + vendor/k8s.io/api/extensions/v1beta1/types.go | 1398 + .../v1beta1/types_swagger_doc_generated.go | 661 + .../v1beta1/zz_generated.deepcopy.go | 1497 + vendor/k8s.io/api/networking/v1/doc.go | 22 + .../k8s.io/api/networking/v1/generated.pb.go | 1849 + .../k8s.io/api/networking/v1/generated.proto | 195 + vendor/k8s.io/api/networking/v1/register.go | 53 + vendor/k8s.io/api/networking/v1/types.go | 203 + .../v1/types_swagger_doc_generated.go | 113 + .../networking/v1/zz_generated.deepcopy.go | 262 + vendor/k8s.io/api/networking/v1beta1/doc.go | 22 + .../api/networking/v1beta1/generated.pb.go | 1953 + .../api/networking/v1beta1/generated.proto | 186 + .../k8s.io/api/networking/v1beta1/register.go | 56 + vendor/k8s.io/api/networking/v1beta1/types.go | 192 + .../v1beta1/types_swagger_doc_generated.go | 127 + .../v1beta1/zz_generated.deepcopy.go | 252 + vendor/k8s.io/api/node/v1alpha1/doc.go | 23 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 696 + .../k8s.io/api/node/v1alpha1/generated.proto | 76 + vendor/k8s.io/api/node/v1alpha1/register.go | 52 + vendor/k8s.io/api/node/v1alpha1/types.go | 75 + .../v1alpha1/types_swagger_doc_generated.go | 59 + .../node/v1alpha1/zz_generated.deepcopy.go | 101 + vendor/k8s.io/api/node/v1beta1/doc.go | 23 + .../k8s.io/api/node/v1beta1/generated.pb.go | 564 + .../k8s.io/api/node/v1beta1/generated.proto | 66 + vendor/k8s.io/api/node/v1beta1/register.go | 52 + vendor/k8s.io/api/node/v1beta1/types.go | 65 + .../v1beta1/types_swagger_doc_generated.go | 50 + .../api/node/v1beta1/zz_generated.deepcopy.go | 84 + vendor/k8s.io/api/policy/v1beta1/doc.go | 24 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 4742 ++ .../k8s.io/api/policy/v1beta1/generated.proto | 400 + vendor/k8s.io/api/policy/v1beta1/register.go | 56 + vendor/k8s.io/api/policy/v1beta1/types.go | 489 + .../v1beta1/types_swagger_doc_generated.go | 243 + .../policy/v1beta1/zz_generated.deepcopy.go | 540 + vendor/k8s.io/api/rbac/v1/doc.go | 23 + vendor/k8s.io/api/rbac/v1/generated.pb.go | 2729 + vendor/k8s.io/api/rbac/v1/generated.proto | 199 + vendor/k8s.io/api/rbac/v1/register.go | 58 + vendor/k8s.io/api/rbac/v1/types.go | 237 + .../rbac/v1/types_swagger_doc_generated.go | 158 + .../api/rbac/v1/zz_generated.deepcopy.go | 389 + vendor/k8s.io/api/rbac/v1alpha1/doc.go | 23 + .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 2730 + .../k8s.io/api/rbac/v1alpha1/generated.proto | 201 + vendor/k8s.io/api/rbac/v1alpha1/register.go | 58 + vendor/k8s.io/api/rbac/v1alpha1/types.go | 239 + .../v1alpha1/types_swagger_doc_generated.go | 158 + .../rbac/v1alpha1/zz_generated.deepcopy.go | 389 + vendor/k8s.io/api/rbac/v1beta1/doc.go | 23 + .../k8s.io/api/rbac/v1beta1/generated.pb.go | 2729 + .../k8s.io/api/rbac/v1beta1/generated.proto | 200 + vendor/k8s.io/api/rbac/v1beta1/register.go | 58 + vendor/k8s.io/api/rbac/v1beta1/types.go | 237 + .../v1beta1/types_swagger_doc_generated.go | 158 + .../api/rbac/v1beta1/zz_generated.deepcopy.go | 389 + vendor/k8s.io/api/scheduling/v1/doc.go | 23 + .../k8s.io/api/scheduling/v1/generated.pb.go | 667 + .../k8s.io/api/scheduling/v1/generated.proto | 75 + vendor/k8s.io/api/scheduling/v1/register.go | 55 + vendor/k8s.io/api/scheduling/v1/types.go | 74 + .../v1/types_swagger_doc_generated.go | 53 + .../scheduling/v1/zz_generated.deepcopy.go | 90 + vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 23 + .../api/scheduling/v1alpha1/generated.pb.go | 667 + .../api/scheduling/v1alpha1/generated.proto | 76 + .../api/scheduling/v1alpha1/register.go | 52 + .../k8s.io/api/scheduling/v1alpha1/types.go | 75 + .../v1alpha1/types_swagger_doc_generated.go | 53 + .../v1alpha1/zz_generated.deepcopy.go | 90 + vendor/k8s.io/api/scheduling/v1beta1/doc.go | 23 + .../api/scheduling/v1beta1/generated.pb.go | 667 + .../api/scheduling/v1beta1/generated.proto | 76 + .../k8s.io/api/scheduling/v1beta1/register.go | 52 + vendor/k8s.io/api/scheduling/v1beta1/types.go | 75 + .../v1beta1/types_swagger_doc_generated.go | 53 + .../v1beta1/zz_generated.deepcopy.go | 90 + vendor/k8s.io/api/settings/v1alpha1/doc.go | 23 + .../api/settings/v1alpha1/generated.pb.go | 910 + .../api/settings/v1alpha1/generated.proto | 75 + .../k8s.io/api/settings/v1alpha1/register.go | 52 + vendor/k8s.io/api/settings/v1alpha1/types.go | 70 + .../v1alpha1/types_swagger_doc_generated.go | 61 + .../v1alpha1/zz_generated.deepcopy.go | 131 + vendor/k8s.io/api/storage/v1/doc.go | 22 + vendor/k8s.io/api/storage/v1/generated.pb.go | 2296 + vendor/k8s.io/api/storage/v1/generated.proto | 195 + vendor/k8s.io/api/storage/v1/register.go | 56 + vendor/k8s.io/api/storage/v1/types.go | 218 + .../storage/v1/types_swagger_doc_generated.go | 119 + .../api/storage/v1/zz_generated.deepcopy.go | 273 + vendor/k8s.io/api/storage/v1alpha1/doc.go | 22 + .../api/storage/v1alpha1/generated.pb.go | 1556 + .../api/storage/v1alpha1/generated.proto | 136 + .../k8s.io/api/storage/v1alpha1/register.go | 50 + vendor/k8s.io/api/storage/v1alpha1/types.go | 136 + .../v1alpha1/types_swagger_doc_generated.go | 93 + .../storage/v1alpha1/zz_generated.deepcopy.go | 180 + vendor/k8s.io/api/storage/v1beta1/doc.go | 22 + .../api/storage/v1beta1/generated.pb.go | 3522 + .../api/storage/v1beta1/generated.proto | 332 + vendor/k8s.io/api/storage/v1beta1/register.go | 62 + vendor/k8s.io/api/storage/v1beta1/types.go | 375 + .../v1beta1/types_swagger_doc_generated.go | 189 + .../storage/v1beta1/zz_generated.deepcopy.go | 463 + vendor/k8s.io/apimachinery/LICENSE | 202 + .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 26 + .../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 + .../apimachinery/pkg/api/errors/errors.go | 666 + .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 27 + .../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 + .../apimachinery/pkg/api/meta/errors.go | 121 + .../pkg/api/meta/firsthit_restmapper.go | 97 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 264 + .../apimachinery/pkg/api/meta/interfaces.go | 134 + .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 649 + .../pkg/api/meta/multirestmapper.go | 210 + .../apimachinery/pkg/api/meta/priority.go | 222 + .../apimachinery/pkg/api/meta/restmapper.go | 518 + .../apimachinery/pkg/api/resource/OWNERS | 18 + .../apimachinery/pkg/api/resource/amount.go | 299 + .../pkg/api/resource/generated.pb.go | 75 + .../pkg/api/resource/generated.proto | 88 + .../apimachinery/pkg/api/resource/math.go | 314 + .../apimachinery/pkg/api/resource/quantity.go | 744 + .../pkg/api/resource/quantity_proto.go | 284 + .../pkg/api/resource/scale_int.go | 95 + .../apimachinery/pkg/api/resource/suffix.go | 198 + .../pkg/api/resource/zz_generated.deepcopy.go | 27 + .../apis/meta/internalversion/conversion.go | 52 + .../pkg/apis/meta/internalversion/doc.go | 20 + .../pkg/apis/meta/internalversion/register.go | 113 + .../pkg/apis/meta/internalversion/types.go | 76 + .../zz_generated.conversion.go | 143 + .../internalversion/zz_generated.deepcopy.go | 96 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 33 + .../pkg/apis/meta/v1/controller_ref.go | 54 + .../pkg/apis/meta/v1/conversion.go | 329 + .../apimachinery/pkg/apis/meta/v1/deepcopy.go | 46 + .../apimachinery/pkg/apis/meta/v1/doc.go | 23 + .../apimachinery/pkg/apis/meta/v1/duration.go | 60 + .../pkg/apis/meta/v1/generated.pb.go | 9707 +++ .../pkg/apis/meta/v1/generated.proto | 1045 + .../pkg/apis/meta/v1/group_version.go | 148 + .../apimachinery/pkg/apis/meta/v1/helpers.go | 267 + .../apimachinery/pkg/apis/meta/v1/labels.go | 55 + .../apimachinery/pkg/apis/meta/v1/meta.go | 182 + .../pkg/apis/meta/v1/micro_time.go | 196 + .../pkg/apis/meta/v1/micro_time_proto.go | 72 + .../apimachinery/pkg/apis/meta/v1/register.go | 116 + .../apimachinery/pkg/apis/meta/v1/time.go | 187 + .../pkg/apis/meta/v1/time_proto.go | 92 + .../apimachinery/pkg/apis/meta/v1/types.go | 1320 + .../meta/v1/types_swagger_doc_generated.go | 461 + .../pkg/apis/meta/v1/unstructured/helpers.go | 470 + .../apis/meta/v1/unstructured/unstructured.go | 521 + .../meta/v1/unstructured/unstructured_list.go | 210 + .../v1/unstructured/zz_generated.deepcopy.go | 55 + .../apimachinery/pkg/apis/meta/v1/watch.go | 89 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1222 + .../pkg/apis/meta/v1/zz_generated.defaults.go | 32 + .../pkg/apis/meta/v1beta1/conversion.go | 27 + .../pkg/apis/meta/v1beta1/deepcopy.go | 17 + .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 23 + .../pkg/apis/meta/v1beta1/generated.pb.go | 398 + .../pkg/apis/meta/v1beta1/generated.proto | 42 + .../pkg/apis/meta/v1beta1/register.go | 61 + .../pkg/apis/meta/v1beta1/types.go | 84 + .../v1beta1/types_swagger_doc_generated.go | 40 + .../meta/v1beta1/zz_generated.deepcopy.go | 59 + .../meta/v1beta1/zz_generated.defaults.go | 32 + .../apimachinery/pkg/conversion/converter.go | 898 + .../apimachinery/pkg/conversion/deep_equal.go | 36 + .../k8s.io/apimachinery/pkg/conversion/doc.go | 24 + .../apimachinery/pkg/conversion/helper.go | 39 + .../pkg/conversion/queryparams/convert.go | 194 + .../pkg/conversion/queryparams/doc.go | 19 + vendor/k8s.io/apimachinery/pkg/fields/doc.go | 19 + .../k8s.io/apimachinery/pkg/fields/fields.go | 62 + .../apimachinery/pkg/fields/requirements.go | 30 + .../apimachinery/pkg/fields/selector.go | 476 + vendor/k8s.io/apimachinery/pkg/labels/doc.go | 19 + .../k8s.io/apimachinery/pkg/labels/labels.go | 181 + .../apimachinery/pkg/labels/selector.go | 891 + .../pkg/labels/zz_generated.deepcopy.go | 42 + .../k8s.io/apimachinery/pkg/runtime/codec.go | 332 + .../apimachinery/pkg/runtime/codec_check.go | 48 + .../apimachinery/pkg/runtime/conversion.go | 113 + .../apimachinery/pkg/runtime/converter.go | 805 + vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 51 + .../apimachinery/pkg/runtime/embedded.go | 142 + .../k8s.io/apimachinery/pkg/runtime/error.go | 151 + .../apimachinery/pkg/runtime/extension.go | 51 + .../apimachinery/pkg/runtime/generated.pb.go | 753 + .../apimachinery/pkg/runtime/generated.proto | 127 + .../k8s.io/apimachinery/pkg/runtime/helper.go | 259 + .../apimachinery/pkg/runtime/interfaces.go | 278 + .../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 + .../apimachinery/pkg/runtime/register.go | 61 + .../pkg/runtime/schema/generated.pb.go | 63 + .../pkg/runtime/schema/generated.proto | 26 + .../pkg/runtime/schema/group_version.go | 300 + .../pkg/runtime/schema/interfaces.go | 40 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 754 + .../pkg/runtime/scheme_builder.go | 48 + .../pkg/runtime/serializer/codec_factory.go | 274 + .../pkg/runtime/serializer/json/json.go | 368 + .../pkg/runtime/serializer/json/meta.go | 63 + .../runtime/serializer/negotiated_codec.go | 43 + .../pkg/runtime/serializer/protobuf/doc.go | 18 + .../runtime/serializer/protobuf/protobuf.go | 456 + .../serializer/recognizer/recognizer.go | 127 + .../runtime/serializer/streaming/streaming.go | 137 + .../serializer/versioning/versioning.go | 240 + .../pkg/runtime/swagger_doc_generator.go | 262 + .../k8s.io/apimachinery/pkg/runtime/types.go | 139 + .../apimachinery/pkg/runtime/types_proto.go | 69 + .../pkg/runtime/zz_generated.deepcopy.go | 108 + .../apimachinery/pkg/selection/operator.go | 33 + vendor/k8s.io/apimachinery/pkg/types/doc.go | 18 + .../apimachinery/pkg/types/namespacedname.go | 43 + .../k8s.io/apimachinery/pkg/types/nodename.go | 43 + vendor/k8s.io/apimachinery/pkg/types/patch.go | 29 + vendor/k8s.io/apimachinery/pkg/types/uid.go | 22 + .../apimachinery/pkg/util/cache/cache.go | 83 + .../pkg/util/cache/lruexpirecache.go | 102 + .../apimachinery/pkg/util/clock/clock.go | 348 + .../k8s.io/apimachinery/pkg/util/diff/diff.go | 118 + .../apimachinery/pkg/util/errors/doc.go | 18 + .../apimachinery/pkg/util/errors/errors.go | 229 + .../apimachinery/pkg/util/framer/framer.go | 167 + .../pkg/util/intstr/generated.pb.go | 362 + .../pkg/util/intstr/generated.proto | 43 + .../apimachinery/pkg/util/intstr/intstr.go | 184 + .../k8s.io/apimachinery/pkg/util/json/json.go | 119 + .../apimachinery/pkg/util/mergepatch/OWNERS | 7 + .../pkg/util/mergepatch/errors.go | 102 + .../apimachinery/pkg/util/mergepatch/util.go | 133 + .../pkg/util/naming/from_stack.go | 93 + .../k8s.io/apimachinery/pkg/util/net/http.go | 445 + .../apimachinery/pkg/util/net/interface.go | 416 + .../apimachinery/pkg/util/net/port_range.go | 149 + .../apimachinery/pkg/util/net/port_split.go | 77 + .../k8s.io/apimachinery/pkg/util/net/util.go | 56 + .../apimachinery/pkg/util/runtime/runtime.go | 168 + .../k8s.io/apimachinery/pkg/util/sets/byte.go | 203 + .../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 + .../apimachinery/pkg/util/sets/empty.go | 23 + .../k8s.io/apimachinery/pkg/util/sets/int.go | 203 + .../apimachinery/pkg/util/sets/int32.go | 203 + .../apimachinery/pkg/util/sets/int64.go | 203 + .../apimachinery/pkg/util/sets/string.go | 203 + .../pkg/util/strategicpatch/OWNERS | 8 + .../pkg/util/strategicpatch/errors.go | 49 + .../pkg/util/strategicpatch/meta.go | 194 + .../pkg/util/strategicpatch/patch.go | 2174 + .../pkg/util/strategicpatch/types.go | 193 + .../pkg/util/validation/field/errors.go | 259 + .../pkg/util/validation/field/path.go | 91 + .../pkg/util/validation/validation.go | 416 + .../k8s.io/apimachinery/pkg/util/wait/doc.go | 19 + .../k8s.io/apimachinery/pkg/util/wait/wait.go | 504 + .../apimachinery/pkg/util/yaml/decoder.go | 344 + vendor/k8s.io/apimachinery/pkg/version/doc.go | 20 + .../apimachinery/pkg/version/helpers.go | 88 + .../k8s.io/apimachinery/pkg/version/types.go | 37 + vendor/k8s.io/apimachinery/pkg/watch/doc.go | 19 + .../k8s.io/apimachinery/pkg/watch/filter.go | 105 + vendor/k8s.io/apimachinery/pkg/watch/mux.go | 260 + .../apimachinery/pkg/watch/streamwatcher.go | 132 + vendor/k8s.io/apimachinery/pkg/watch/watch.go | 322 + .../pkg/watch/zz_generated.deepcopy.go | 40 + .../third_party/forked/golang/json/OWNERS | 7 + .../third_party/forked/golang/json/fields.go | 513 + .../forked/golang/reflect/deep_equal.go | 388 + vendor/k8s.io/client-go/LICENSE | 202 + .../client-go/discovery/discovery_client.go | 508 + vendor/k8s.io/client-go/discovery/doc.go | 19 + .../client-go/discovery/fake/discovery.go | 160 + vendor/k8s.io/client-go/discovery/helper.go | 125 + .../k8s.io/client-go/kubernetes/clientset.go | 580 + vendor/k8s.io/client-go/kubernetes/doc.go | 20 + .../kubernetes/fake/clientset_generated.go | 327 + .../k8s.io/client-go/kubernetes/fake/doc.go | 20 + .../client-go/kubernetes/fake/register.go | 126 + vendor/k8s.io/client-go/kubernetes/import.go | 19 + .../k8s.io/client-go/kubernetes/scheme/doc.go | 20 + .../client-go/kubernetes/scheme/register.go | 126 + .../v1beta1/admissionregistration_client.go | 94 + .../admissionregistration/v1beta1/doc.go | 20 + .../admissionregistration/v1beta1/fake/doc.go | 20 + .../fake/fake_admissionregistration_client.go | 44 + .../fake/fake_mutatingwebhookconfiguration.go | 120 + .../fake_validatingwebhookconfiguration.go | 120 + .../v1beta1/generated_expansion.go | 23 + .../v1beta1/mutatingwebhookconfiguration.go | 164 + .../v1beta1/validatingwebhookconfiguration.go | 164 + .../kubernetes/typed/apps/v1/apps_client.go | 109 + .../typed/apps/v1/controllerrevision.go | 174 + .../kubernetes/typed/apps/v1/daemonset.go | 191 + .../kubernetes/typed/apps/v1/deployment.go | 223 + .../client-go/kubernetes/typed/apps/v1/doc.go | 20 + .../kubernetes/typed/apps/v1/fake/doc.go | 20 + .../typed/apps/v1/fake/fake_apps_client.go | 56 + .../apps/v1/fake/fake_controllerrevision.go | 128 + .../typed/apps/v1/fake/fake_daemonset.go | 140 + .../typed/apps/v1/fake/fake_deployment.go | 163 + .../typed/apps/v1/fake/fake_replicaset.go | 163 + .../typed/apps/v1/fake/fake_statefulset.go | 163 + .../typed/apps/v1/generated_expansion.go | 29 + .../kubernetes/typed/apps/v1/replicaset.go | 223 + .../kubernetes/typed/apps/v1/statefulset.go | 223 + .../typed/apps/v1beta1/apps_client.go | 99 + .../typed/apps/v1beta1/controllerrevision.go | 174 + .../typed/apps/v1beta1/deployment.go | 191 + .../kubernetes/typed/apps/v1beta1/doc.go | 20 + .../kubernetes/typed/apps/v1beta1/fake/doc.go | 20 + .../apps/v1beta1/fake/fake_apps_client.go | 48 + .../v1beta1/fake/fake_controllerrevision.go | 128 + .../apps/v1beta1/fake/fake_deployment.go | 140 + .../apps/v1beta1/fake/fake_statefulset.go | 140 + .../typed/apps/v1beta1/generated_expansion.go | 25 + .../typed/apps/v1beta1/statefulset.go | 191 + .../typed/apps/v1beta2/apps_client.go | 109 + .../typed/apps/v1beta2/controllerrevision.go | 174 + .../typed/apps/v1beta2/daemonset.go | 191 + .../typed/apps/v1beta2/deployment.go | 191 + .../kubernetes/typed/apps/v1beta2/doc.go | 20 + .../kubernetes/typed/apps/v1beta2/fake/doc.go | 20 + .../apps/v1beta2/fake/fake_apps_client.go | 56 + .../v1beta2/fake/fake_controllerrevision.go | 128 + .../typed/apps/v1beta2/fake/fake_daemonset.go | 140 + .../apps/v1beta2/fake/fake_deployment.go | 140 + .../apps/v1beta2/fake/fake_replicaset.go | 140 + .../apps/v1beta2/fake/fake_statefulset.go | 162 + .../typed/apps/v1beta2/generated_expansion.go | 29 + .../typed/apps/v1beta2/replicaset.go | 191 + .../typed/apps/v1beta2/statefulset.go | 222 + .../v1alpha1/auditregistration_client.go | 89 + .../auditregistration/v1alpha1/auditsink.go | 164 + .../typed/auditregistration/v1alpha1/doc.go | 20 + .../auditregistration/v1alpha1/fake/doc.go | 20 + .../fake/fake_auditregistration_client.go | 40 + .../v1alpha1/fake/fake_auditsink.go | 120 + .../v1alpha1/generated_expansion.go | 21 + .../v1/authentication_client.go | 89 + .../kubernetes/typed/authentication/v1/doc.go | 20 + .../typed/authentication/v1/fake/doc.go | 20 + .../v1/fake/fake_authentication_client.go | 40 + .../v1/fake/fake_tokenreview.go | 24 + .../v1/fake/fake_tokenreview_expansion.go | 27 + .../authentication/v1/generated_expansion.go | 19 + .../typed/authentication/v1/tokenreview.go | 46 + .../v1/tokenreview_expansion.go | 35 + .../v1beta1/authentication_client.go | 89 + .../typed/authentication/v1beta1/doc.go | 20 + .../typed/authentication/v1beta1/fake/doc.go | 20 + .../fake/fake_authentication_client.go | 40 + .../v1beta1/fake/fake_tokenreview.go | 24 + .../fake/fake_tokenreview_expansion.go | 27 + .../v1beta1/generated_expansion.go | 19 + .../authentication/v1beta1/tokenreview.go | 46 + .../v1beta1/tokenreview_expansion.go | 35 + .../authorization/v1/authorization_client.go | 104 + .../kubernetes/typed/authorization/v1/doc.go | 20 + .../typed/authorization/v1/fake/doc.go | 20 + .../v1/fake/fake_authorization_client.go | 52 + .../v1/fake/fake_localsubjectaccessreview.go | 25 + ...fake_localsubjectaccessreview_expansion.go | 27 + .../v1/fake/fake_selfsubjectaccessreview.go | 24 + .../fake_selfsubjectaccessreview_expansion.go | 27 + .../v1/fake/fake_selfsubjectrulesreview.go | 24 + .../fake_selfsubjectrulesreview_expansion.go | 27 + .../v1/fake/fake_subjectaccessreview.go | 24 + .../fake_subjectaccessreview_expansion.go | 30 + .../authorization/v1/generated_expansion.go | 19 + .../v1/localsubjectaccessreview.go | 48 + .../v1/localsubjectaccessreview_expansion.go | 36 + .../v1/selfsubjectaccessreview.go | 46 + .../v1/selfsubjectaccessreview_expansion.go | 35 + .../v1/selfsubjectrulesreview.go | 46 + .../v1/selfsubjectrulesreview_expansion.go | 35 + .../authorization/v1/subjectaccessreview.go | 46 + .../v1/subjectaccessreview_expansion.go | 36 + .../v1beta1/authorization_client.go | 104 + .../typed/authorization/v1beta1/doc.go | 20 + .../typed/authorization/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_authorization_client.go | 52 + .../v1beta1/fake/fake_generated_expansion.go | 17 + .../fake/fake_localsubjectaccessreview.go | 25 + ...fake_localsubjectaccessreview_expansion.go | 27 + .../fake/fake_selfsubjectaccessreview.go | 24 + .../fake_selfsubjectaccessreview_expansion.go | 27 + .../fake/fake_selfsubjectrulesreview.go | 24 + .../fake_selfsubjectrulesreview_expansion.go | 27 + .../v1beta1/fake/fake_subjectaccessreview.go | 24 + .../fake_subjectaccessreview_expansion.go | 27 + .../v1beta1/generated_expansion.go | 19 + .../v1beta1/localsubjectaccessreview.go | 48 + .../localsubjectaccessreview_expansion.go | 36 + .../v1beta1/selfsubjectaccessreview.go | 46 + .../selfsubjectaccessreview_expansion.go | 35 + .../v1beta1/selfsubjectrulesreview.go | 46 + .../selfsubjectrulesreview_expansion.go | 35 + .../v1beta1/subjectaccessreview.go | 46 + .../v1beta1/subjectaccessreview_expansion.go | 36 + .../autoscaling/v1/autoscaling_client.go | 89 + .../kubernetes/typed/autoscaling/v1/doc.go | 20 + .../typed/autoscaling/v1/fake/doc.go | 20 + .../v1/fake/fake_autoscaling_client.go | 40 + .../v1/fake/fake_horizontalpodautoscaler.go | 140 + .../autoscaling/v1/generated_expansion.go | 21 + .../autoscaling/v1/horizontalpodautoscaler.go | 191 + .../autoscaling/v2beta1/autoscaling_client.go | 89 + .../typed/autoscaling/v2beta1/doc.go | 20 + .../typed/autoscaling/v2beta1/fake/doc.go | 20 + .../v2beta1/fake/fake_autoscaling_client.go | 40 + .../fake/fake_horizontalpodautoscaler.go | 140 + .../v2beta1/generated_expansion.go | 21 + .../v2beta1/horizontalpodautoscaler.go | 191 + .../autoscaling/v2beta2/autoscaling_client.go | 89 + .../typed/autoscaling/v2beta2/doc.go | 20 + .../typed/autoscaling/v2beta2/fake/doc.go | 20 + .../v2beta2/fake/fake_autoscaling_client.go | 40 + .../fake/fake_horizontalpodautoscaler.go | 140 + .../v2beta2/generated_expansion.go | 21 + .../v2beta2/horizontalpodautoscaler.go | 191 + .../kubernetes/typed/batch/v1/batch_client.go | 89 + .../kubernetes/typed/batch/v1/doc.go | 20 + .../kubernetes/typed/batch/v1/fake/doc.go | 20 + .../typed/batch/v1/fake/fake_batch_client.go | 40 + .../typed/batch/v1/fake/fake_job.go | 140 + .../typed/batch/v1/generated_expansion.go | 21 + .../kubernetes/typed/batch/v1/job.go | 191 + .../typed/batch/v1beta1/batch_client.go | 89 + .../kubernetes/typed/batch/v1beta1/cronjob.go | 191 + .../kubernetes/typed/batch/v1beta1/doc.go | 20 + .../typed/batch/v1beta1/fake/doc.go | 20 + .../batch/v1beta1/fake/fake_batch_client.go | 40 + .../typed/batch/v1beta1/fake/fake_cronjob.go | 140 + .../batch/v1beta1/generated_expansion.go | 21 + .../typed/batch/v2alpha1/batch_client.go | 89 + .../typed/batch/v2alpha1/cronjob.go | 191 + .../kubernetes/typed/batch/v2alpha1/doc.go | 20 + .../typed/batch/v2alpha1/fake/doc.go | 20 + .../batch/v2alpha1/fake/fake_batch_client.go | 40 + .../typed/batch/v2alpha1/fake/fake_cronjob.go | 140 + .../batch/v2alpha1/generated_expansion.go | 21 + .../v1beta1/certificates_client.go | 89 + .../v1beta1/certificatesigningrequest.go | 180 + .../certificatesigningrequest_expansion.go | 37 + .../typed/certificates/v1beta1/doc.go | 20 + .../typed/certificates/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_certificates_client.go | 40 + .../fake/fake_certificatesigningrequest.go | 131 + ...ake_certificatesigningrequest_expansion.go | 31 + .../v1beta1/generated_expansion.go | 19 + .../coordination/v1/coordination_client.go | 89 + .../kubernetes/typed/coordination/v1/doc.go | 20 + .../typed/coordination/v1/fake/doc.go | 20 + .../v1/fake/fake_coordination_client.go | 40 + .../typed/coordination/v1/fake/fake_lease.go | 128 + .../coordination/v1/generated_expansion.go | 21 + .../kubernetes/typed/coordination/v1/lease.go | 174 + .../v1beta1/coordination_client.go | 89 + .../typed/coordination/v1beta1/doc.go | 20 + .../typed/coordination/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_coordination_client.go | 40 + .../coordination/v1beta1/fake/fake_lease.go | 128 + .../v1beta1/generated_expansion.go | 21 + .../typed/coordination/v1beta1/lease.go | 174 + .../typed/core/v1/componentstatus.go | 164 + .../kubernetes/typed/core/v1/configmap.go | 174 + .../kubernetes/typed/core/v1/core_client.go | 164 + .../client-go/kubernetes/typed/core/v1/doc.go | 20 + .../kubernetes/typed/core/v1/endpoints.go | 174 + .../kubernetes/typed/core/v1/event.go | 174 + .../typed/core/v1/event_expansion.go | 164 + .../kubernetes/typed/core/v1/fake/doc.go | 20 + .../core/v1/fake/fake_componentstatus.go | 120 + .../typed/core/v1/fake/fake_configmap.go | 128 + .../typed/core/v1/fake/fake_core_client.go | 100 + .../typed/core/v1/fake/fake_endpoints.go | 128 + .../typed/core/v1/fake/fake_event.go | 128 + .../core/v1/fake/fake_event_expansion.go | 93 + .../typed/core/v1/fake/fake_limitrange.go | 128 + .../typed/core/v1/fake/fake_namespace.go | 123 + .../core/v1/fake/fake_namespace_expansion.go | 37 + .../typed/core/v1/fake/fake_node.go | 131 + .../typed/core/v1/fake/fake_node_expansion.go | 36 + .../core/v1/fake/fake_persistentvolume.go | 131 + .../v1/fake/fake_persistentvolumeclaim.go | 140 + .../kubernetes/typed/core/v1/fake/fake_pod.go | 140 + .../typed/core/v1/fake/fake_pod_expansion.go | 70 + .../typed/core/v1/fake/fake_podtemplate.go | 128 + .../v1/fake/fake_replicationcontroller.go | 163 + .../typed/core/v1/fake/fake_resourcequota.go | 140 + .../typed/core/v1/fake/fake_secret.go | 128 + .../typed/core/v1/fake/fake_service.go | 132 + .../core/v1/fake/fake_service_expansion.go | 26 + .../typed/core/v1/fake/fake_serviceaccount.go | 128 + .../v1/fake/fake_serviceaccount_expansion.go | 31 + .../typed/core/v1/generated_expansion.go | 39 + .../kubernetes/typed/core/v1/limitrange.go | 174 + .../kubernetes/typed/core/v1/namespace.go | 164 + .../typed/core/v1/namespace_expansion.go | 31 + .../kubernetes/typed/core/v1/node.go | 180 + .../typed/core/v1/node_expansion.go | 43 + .../typed/core/v1/persistentvolume.go | 180 + .../typed/core/v1/persistentvolumeclaim.go | 191 + .../client-go/kubernetes/typed/core/v1/pod.go | 191 + .../kubernetes/typed/core/v1/pod_expansion.go | 45 + .../kubernetes/typed/core/v1/podtemplate.go | 174 + .../typed/core/v1/replicationcontroller.go | 223 + .../kubernetes/typed/core/v1/resourcequota.go | 191 + .../kubernetes/typed/core/v1/secret.go | 174 + .../kubernetes/typed/core/v1/service.go | 174 + .../typed/core/v1/service_expansion.go | 41 + .../typed/core/v1/serviceaccount.go | 174 + .../typed/core/v1/serviceaccount_expansion.go | 41 + .../kubernetes/typed/events/v1beta1/doc.go | 20 + .../kubernetes/typed/events/v1beta1/event.go | 174 + .../typed/events/v1beta1/event_expansion.go | 98 + .../typed/events/v1beta1/events_client.go | 89 + .../typed/events/v1beta1/fake/doc.go | 20 + .../typed/events/v1beta1/fake/fake_event.go | 128 + .../v1beta1/fake/fake_event_expansion.go | 66 + .../events/v1beta1/fake/fake_events_client.go | 40 + .../events/v1beta1/generated_expansion.go | 19 + .../typed/extensions/v1beta1/daemonset.go | 191 + .../typed/extensions/v1beta1/deployment.go | 222 + .../v1beta1/deployment_expansion.go | 29 + .../typed/extensions/v1beta1/doc.go | 20 + .../extensions/v1beta1/extensions_client.go | 114 + .../typed/extensions/v1beta1/fake/doc.go | 20 + .../extensions/v1beta1/fake/fake_daemonset.go | 140 + .../v1beta1/fake/fake_deployment.go | 162 + .../v1beta1/fake/fake_deployment_expansion.go | 33 + .../v1beta1/fake/fake_extensions_client.go | 60 + .../extensions/v1beta1/fake/fake_ingress.go | 140 + .../v1beta1/fake/fake_networkpolicy.go | 128 + .../v1beta1/fake/fake_podsecuritypolicy.go | 120 + .../v1beta1/fake/fake_replicaset.go | 162 + .../extensions/v1beta1/generated_expansion.go | 29 + .../typed/extensions/v1beta1/ingress.go | 191 + .../typed/extensions/v1beta1/networkpolicy.go | 174 + .../extensions/v1beta1/podsecuritypolicy.go | 164 + .../typed/extensions/v1beta1/replicaset.go | 222 + .../kubernetes/typed/networking/v1/doc.go | 20 + .../typed/networking/v1/fake/doc.go | 20 + .../v1/fake/fake_networking_client.go | 40 + .../networking/v1/fake/fake_networkpolicy.go | 128 + .../networking/v1/generated_expansion.go | 21 + .../typed/networking/v1/networking_client.go | 89 + .../typed/networking/v1/networkpolicy.go | 174 + .../typed/networking/v1beta1/doc.go | 20 + .../typed/networking/v1beta1/fake/doc.go | 20 + .../networking/v1beta1/fake/fake_ingress.go | 140 + .../v1beta1/fake/fake_networking_client.go | 40 + .../networking/v1beta1/generated_expansion.go | 21 + .../typed/networking/v1beta1/ingress.go | 191 + .../networking/v1beta1/networking_client.go | 89 + .../kubernetes/typed/node/v1alpha1/doc.go | 20 + .../typed/node/v1alpha1/fake/doc.go | 20 + .../node/v1alpha1/fake/fake_node_client.go | 40 + .../node/v1alpha1/fake/fake_runtimeclass.go | 120 + .../node/v1alpha1/generated_expansion.go | 21 + .../typed/node/v1alpha1/node_client.go | 89 + .../typed/node/v1alpha1/runtimeclass.go | 164 + .../kubernetes/typed/node/v1beta1/doc.go | 20 + .../kubernetes/typed/node/v1beta1/fake/doc.go | 20 + .../node/v1beta1/fake/fake_node_client.go | 40 + .../node/v1beta1/fake/fake_runtimeclass.go | 120 + .../typed/node/v1beta1/generated_expansion.go | 21 + .../typed/node/v1beta1/node_client.go | 89 + .../typed/node/v1beta1/runtimeclass.go | 164 + .../kubernetes/typed/policy/v1beta1/doc.go | 20 + .../typed/policy/v1beta1/eviction.go | 48 + .../policy/v1beta1/eviction_expansion.go | 38 + .../typed/policy/v1beta1/fake/doc.go | 20 + .../policy/v1beta1/fake/fake_eviction.go | 25 + .../v1beta1/fake/fake_eviction_expansion.go | 35 + .../v1beta1/fake/fake_poddisruptionbudget.go | 140 + .../v1beta1/fake/fake_podsecuritypolicy.go | 120 + .../policy/v1beta1/fake/fake_policy_client.go | 48 + .../policy/v1beta1/generated_expansion.go | 23 + .../policy/v1beta1/poddisruptionbudget.go | 191 + .../typed/policy/v1beta1/podsecuritypolicy.go | 164 + .../typed/policy/v1beta1/policy_client.go | 99 + .../kubernetes/typed/rbac/v1/clusterrole.go | 164 + .../typed/rbac/v1/clusterrolebinding.go | 164 + .../client-go/kubernetes/typed/rbac/v1/doc.go | 20 + .../kubernetes/typed/rbac/v1/fake/doc.go | 20 + .../typed/rbac/v1/fake/fake_clusterrole.go | 120 + .../rbac/v1/fake/fake_clusterrolebinding.go | 120 + .../typed/rbac/v1/fake/fake_rbac_client.go | 52 + .../typed/rbac/v1/fake/fake_role.go | 128 + .../typed/rbac/v1/fake/fake_rolebinding.go | 128 + .../typed/rbac/v1/generated_expansion.go | 27 + .../kubernetes/typed/rbac/v1/rbac_client.go | 104 + .../kubernetes/typed/rbac/v1/role.go | 174 + .../kubernetes/typed/rbac/v1/rolebinding.go | 174 + .../typed/rbac/v1alpha1/clusterrole.go | 164 + .../typed/rbac/v1alpha1/clusterrolebinding.go | 164 + .../kubernetes/typed/rbac/v1alpha1/doc.go | 20 + .../typed/rbac/v1alpha1/fake/doc.go | 20 + .../rbac/v1alpha1/fake/fake_clusterrole.go | 120 + .../v1alpha1/fake/fake_clusterrolebinding.go | 120 + .../rbac/v1alpha1/fake/fake_rbac_client.go | 52 + .../typed/rbac/v1alpha1/fake/fake_role.go | 128 + .../rbac/v1alpha1/fake/fake_rolebinding.go | 128 + .../rbac/v1alpha1/generated_expansion.go | 27 + .../typed/rbac/v1alpha1/rbac_client.go | 104 + .../kubernetes/typed/rbac/v1alpha1/role.go | 174 + .../typed/rbac/v1alpha1/rolebinding.go | 174 + .../typed/rbac/v1beta1/clusterrole.go | 164 + .../typed/rbac/v1beta1/clusterrolebinding.go | 164 + .../kubernetes/typed/rbac/v1beta1/doc.go | 20 + .../kubernetes/typed/rbac/v1beta1/fake/doc.go | 20 + .../rbac/v1beta1/fake/fake_clusterrole.go | 120 + .../v1beta1/fake/fake_clusterrolebinding.go | 120 + .../rbac/v1beta1/fake/fake_rbac_client.go | 52 + .../typed/rbac/v1beta1/fake/fake_role.go | 128 + .../rbac/v1beta1/fake/fake_rolebinding.go | 128 + .../typed/rbac/v1beta1/generated_expansion.go | 27 + .../typed/rbac/v1beta1/rbac_client.go | 104 + .../kubernetes/typed/rbac/v1beta1/role.go | 174 + .../typed/rbac/v1beta1/rolebinding.go | 174 + .../kubernetes/typed/scheduling/v1/doc.go | 20 + .../typed/scheduling/v1/fake/doc.go | 20 + .../scheduling/v1/fake/fake_priorityclass.go | 120 + .../v1/fake/fake_scheduling_client.go | 40 + .../scheduling/v1/generated_expansion.go | 21 + .../typed/scheduling/v1/priorityclass.go | 164 + .../typed/scheduling/v1/scheduling_client.go | 89 + .../typed/scheduling/v1alpha1/doc.go | 20 + .../typed/scheduling/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_priorityclass.go | 120 + .../v1alpha1/fake/fake_scheduling_client.go | 40 + .../v1alpha1/generated_expansion.go | 21 + .../scheduling/v1alpha1/priorityclass.go | 164 + .../scheduling/v1alpha1/scheduling_client.go | 89 + .../typed/scheduling/v1beta1/doc.go | 20 + .../typed/scheduling/v1beta1/fake/doc.go | 20 + .../v1beta1/fake/fake_priorityclass.go | 120 + .../v1beta1/fake/fake_scheduling_client.go | 40 + .../scheduling/v1beta1/generated_expansion.go | 21 + .../typed/scheduling/v1beta1/priorityclass.go | 164 + .../scheduling/v1beta1/scheduling_client.go | 89 + .../kubernetes/typed/settings/v1alpha1/doc.go | 20 + .../typed/settings/v1alpha1/fake/doc.go | 20 + .../settings/v1alpha1/fake/fake_podpreset.go | 128 + .../v1alpha1/fake/fake_settings_client.go | 40 + .../settings/v1alpha1/generated_expansion.go | 21 + .../typed/settings/v1alpha1/podpreset.go | 174 + .../settings/v1alpha1/settings_client.go | 89 + .../kubernetes/typed/storage/v1/doc.go | 20 + .../kubernetes/typed/storage/v1/fake/doc.go | 20 + .../storage/v1/fake/fake_storage_client.go | 44 + .../storage/v1/fake/fake_storageclass.go | 120 + .../storage/v1/fake/fake_volumeattachment.go | 131 + .../typed/storage/v1/generated_expansion.go | 23 + .../typed/storage/v1/storage_client.go | 94 + .../typed/storage/v1/storageclass.go | 164 + .../typed/storage/v1/volumeattachment.go | 180 + .../kubernetes/typed/storage/v1alpha1/doc.go | 20 + .../typed/storage/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_storage_client.go | 40 + .../v1alpha1/fake/fake_volumeattachment.go | 131 + .../storage/v1alpha1/generated_expansion.go | 21 + .../typed/storage/v1alpha1/storage_client.go | 89 + .../storage/v1alpha1/volumeattachment.go | 180 + .../typed/storage/v1beta1/csidriver.go | 164 + .../typed/storage/v1beta1/csinode.go | 164 + .../kubernetes/typed/storage/v1beta1/doc.go | 20 + .../typed/storage/v1beta1/fake/doc.go | 20 + .../storage/v1beta1/fake/fake_csidriver.go | 120 + .../storage/v1beta1/fake/fake_csinode.go | 120 + .../v1beta1/fake/fake_storage_client.go | 52 + .../storage/v1beta1/fake/fake_storageclass.go | 120 + .../v1beta1/fake/fake_volumeattachment.go | 131 + .../storage/v1beta1/generated_expansion.go | 27 + .../typed/storage/v1beta1/storage_client.go | 104 + .../typed/storage/v1beta1/storageclass.go | 164 + .../typed/storage/v1beta1/volumeattachment.go | 180 + .../pkg/apis/clientauthentication/OWNERS | 9 + .../pkg/apis/clientauthentication/doc.go | 20 + .../pkg/apis/clientauthentication/register.go | 50 + .../pkg/apis/clientauthentication/types.go | 77 + .../apis/clientauthentication/v1alpha1/doc.go | 24 + .../clientauthentication/v1alpha1/register.go | 55 + .../clientauthentication/v1alpha1/types.go | 78 + .../v1alpha1/zz_generated.conversion.go | 176 + .../v1alpha1/zz_generated.deepcopy.go | 128 + .../v1alpha1/zz_generated.defaults.go | 32 + .../v1beta1/conversion.go | 26 + .../apis/clientauthentication/v1beta1/doc.go | 24 + .../clientauthentication/v1beta1/register.go | 55 + .../clientauthentication/v1beta1/types.go | 59 + .../v1beta1/zz_generated.conversion.go | 142 + .../v1beta1/zz_generated.deepcopy.go | 92 + .../v1beta1/zz_generated.defaults.go | 32 + .../zz_generated.deepcopy.go | 128 + .../client-go/pkg/version/.gitattributes | 1 + vendor/k8s.io/client-go/pkg/version/base.go | 63 + vendor/k8s.io/client-go/pkg/version/def.bzl | 38 + vendor/k8s.io/client-go/pkg/version/doc.go | 21 + .../k8s.io/client-go/pkg/version/version.go | 42 + .../plugin/pkg/client/auth/exec/exec.go | 360 + .../plugin/pkg/client/auth/gcp/OWNERS | 8 + .../plugin/pkg/client/auth/gcp/gcp.go | 383 + .../plugin/pkg/client/auth/oidc/OWNERS | 7 + .../plugin/pkg/client/auth/oidc/oidc.go | 379 + .../pkg/client/auth/openstack/openstack.go | 193 + vendor/k8s.io/client-go/rest/OWNERS | 26 + vendor/k8s.io/client-go/rest/client.go | 258 + vendor/k8s.io/client-go/rest/config.go | 549 + vendor/k8s.io/client-go/rest/plugin.go | 73 + vendor/k8s.io/client-go/rest/request.go | 1206 + vendor/k8s.io/client-go/rest/transport.go | 118 + vendor/k8s.io/client-go/rest/url_utils.go | 97 + vendor/k8s.io/client-go/rest/urlbackoff.go | 107 + vendor/k8s.io/client-go/rest/watch/decoder.go | 72 + vendor/k8s.io/client-go/rest/watch/encoder.go | 56 + .../client-go/rest/zz_generated.deepcopy.go | 52 + vendor/k8s.io/client-go/testing/actions.go | 671 + vendor/k8s.io/client-go/testing/fake.go | 216 + vendor/k8s.io/client-go/testing/fixture.go | 557 + .../forked/golang/template/exec.go | 94 + .../forked/golang/template/funcs.go | 599 + vendor/k8s.io/client-go/tools/auth/OWNERS | 9 + .../k8s.io/client-go/tools/auth/clientauth.go | 126 + vendor/k8s.io/client-go/tools/cache/OWNERS | 51 + .../client-go/tools/cache/controller.go | 380 + .../client-go/tools/cache/delta_fifo.go | 655 + vendor/k8s.io/client-go/tools/cache/doc.go | 24 + .../client-go/tools/cache/expiration_cache.go | 216 + .../tools/cache/expiration_cache_fakes.go | 54 + .../tools/cache/fake_custom_store.go | 102 + vendor/k8s.io/client-go/tools/cache/fifo.go | 358 + vendor/k8s.io/client-go/tools/cache/heap.go | 323 + vendor/k8s.io/client-go/tools/cache/index.go | 87 + .../k8s.io/client-go/tools/cache/listers.go | 180 + .../k8s.io/client-go/tools/cache/listwatch.go | 114 + .../client-go/tools/cache/mutation_cache.go | 261 + .../tools/cache/mutation_detector.go | 130 + .../k8s.io/client-go/tools/cache/reflector.go | 401 + .../tools/cache/reflector_metrics.go | 102 + .../client-go/tools/cache/shared_informer.go | 650 + vendor/k8s.io/client-go/tools/cache/store.go | 244 + .../tools/cache/thread_safe_store.go | 311 + .../client-go/tools/cache/undelta_store.go | 83 + .../client-go/tools/clientcmd/api/doc.go | 19 + .../client-go/tools/clientcmd/api/helpers.go | 188 + .../tools/clientcmd/api/latest/latest.go | 61 + .../client-go/tools/clientcmd/api/register.go | 46 + .../client-go/tools/clientcmd/api/types.go | 262 + .../tools/clientcmd/api/v1/conversion.go | 244 + .../client-go/tools/clientcmd/api/v1/doc.go | 19 + .../tools/clientcmd/api/v1/register.go | 56 + .../client-go/tools/clientcmd/api/v1/types.go | 203 + .../clientcmd/api/v1/zz_generated.deepcopy.go | 348 + .../clientcmd/api/zz_generated.deepcopy.go | 324 + .../client-go/tools/clientcmd/auth_loaders.go | 111 + .../tools/clientcmd/client_config.go | 561 + .../client-go/tools/clientcmd/config.go | 490 + .../k8s.io/client-go/tools/clientcmd/doc.go | 37 + .../k8s.io/client-go/tools/clientcmd/flag.go | 49 + .../client-go/tools/clientcmd/helpers.go | 35 + .../client-go/tools/clientcmd/loader.go | 633 + .../tools/clientcmd/merged_client_builder.go | 173 + .../client-go/tools/clientcmd/overrides.go | 247 + .../client-go/tools/clientcmd/validation.go | 298 + vendor/k8s.io/client-go/tools/metrics/OWNERS | 9 + .../k8s.io/client-go/tools/metrics/metrics.go | 61 + vendor/k8s.io/client-go/tools/pager/pager.go | 231 + .../k8s.io/client-go/tools/reference/ref.go | 126 + vendor/k8s.io/client-go/transport/OWNERS | 9 + vendor/k8s.io/client-go/transport/cache.go | 117 + vendor/k8s.io/client-go/transport/config.go | 126 + .../client-go/transport/round_trippers.go | 564 + .../client-go/transport/token_source.go | 149 + .../k8s.io/client-go/transport/transport.go | 227 + vendor/k8s.io/client-go/util/cert/OWNERS | 9 + vendor/k8s.io/client-go/util/cert/cert.go | 206 + vendor/k8s.io/client-go/util/cert/csr.go | 75 + vendor/k8s.io/client-go/util/cert/io.go | 98 + vendor/k8s.io/client-go/util/cert/pem.go | 61 + .../util/connrotation/connrotation.go | 105 + .../client-go/util/flowcontrol/backoff.go | 149 + .../client-go/util/flowcontrol/throttle.go | 143 + .../k8s.io/client-go/util/homedir/homedir.go | 47 + vendor/k8s.io/client-go/util/jsonpath/doc.go | 20 + .../client-go/util/jsonpath/jsonpath.go | 525 + vendor/k8s.io/client-go/util/jsonpath/node.go | 256 + .../k8s.io/client-go/util/jsonpath/parser.go | 526 + vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 + vendor/k8s.io/client-go/util/keyutil/key.go | 323 + vendor/k8s.io/client-go/util/retry/OWNERS | 4 + vendor/k8s.io/client-go/util/retry/util.go | 79 + vendor/k8s.io/klog/.travis.yml | 16 + vendor/k8s.io/klog/CONTRIBUTING.md | 22 + vendor/k8s.io/klog/LICENSE | 191 + vendor/k8s.io/klog/OWNERS | 19 + vendor/k8s.io/klog/README.md | 97 + vendor/k8s.io/klog/RELEASE.md | 9 + vendor/k8s.io/klog/SECURITY_CONTACTS | 20 + vendor/k8s.io/klog/code-of-conduct.md | 3 + vendor/k8s.io/klog/go.mod | 5 + vendor/k8s.io/klog/go.sum | 2 + vendor/k8s.io/klog/klog.go | 1316 + vendor/k8s.io/klog/klog_file.go | 139 + vendor/k8s.io/kube-openapi/LICENSE | 202 + .../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 + .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 + .../kube-openapi/pkg/util/proto/document.go | 303 + .../kube-openapi/pkg/util/proto/openapi.go | 278 + vendor/k8s.io/utils/LICENSE | 202 + vendor/k8s.io/utils/buffer/ring_growing.go | 72 + vendor/k8s.io/utils/integer/integer.go | 73 + vendor/k8s.io/utils/trace/trace.go | 97 + vendor/modules.txt | 571 + vendor/sigs.k8s.io/yaml/.gitignore | 20 + vendor/sigs.k8s.io/yaml/.travis.yml | 14 + vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 + vendor/sigs.k8s.io/yaml/LICENSE | 50 + vendor/sigs.k8s.io/yaml/OWNERS | 25 + vendor/sigs.k8s.io/yaml/README.md | 121 + vendor/sigs.k8s.io/yaml/RELEASE.md | 9 + vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 + vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 + vendor/sigs.k8s.io/yaml/fields.go | 502 + vendor/sigs.k8s.io/yaml/yaml.go | 319 + vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 + 3514 files changed, 1101622 insertions(+), 19 deletions(-) create mode 100644 vendor/cloud.google.com/go/LICENSE create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/LICENSE create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/NOTICE create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/version/version.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/version.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/convert.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go create mode 100644 vendor/github.com/Azure/go-autorest/logger/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/logger/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/logger/logger.go create mode 100644 vendor/github.com/Azure/go-autorest/tracing/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/tracing/tracing.go create mode 100644 vendor/github.com/DataDog/datadog-go/LICENSE.txt create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/README.md create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/options.go create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/statsd.go create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/udp.go create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/uds.go create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/uds_async.go create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go create mode 100644 vendor/github.com/DataDog/zstd/.travis.yml create mode 100644 vendor/github.com/DataDog/zstd/LICENSE create mode 100644 vendor/github.com/DataDog/zstd/README.md create mode 100644 vendor/github.com/DataDog/zstd/ZSTD_LICENSE create mode 100644 vendor/github.com/DataDog/zstd/bitstream.h create mode 100644 vendor/github.com/DataDog/zstd/compiler.h create mode 100644 vendor/github.com/DataDog/zstd/cover.c create mode 100644 vendor/github.com/DataDog/zstd/cpu.h create mode 100644 vendor/github.com/DataDog/zstd/divsufsort.c create mode 100644 vendor/github.com/DataDog/zstd/divsufsort.h create mode 100644 vendor/github.com/DataDog/zstd/entropy_common.c create mode 100644 vendor/github.com/DataDog/zstd/error_private.c create mode 100644 vendor/github.com/DataDog/zstd/error_private.h create mode 100644 vendor/github.com/DataDog/zstd/errors.go create mode 100644 vendor/github.com/DataDog/zstd/fse.h create mode 100644 vendor/github.com/DataDog/zstd/fse_compress.c create mode 100644 vendor/github.com/DataDog/zstd/fse_decompress.c create mode 100644 vendor/github.com/DataDog/zstd/huf.h create mode 100644 vendor/github.com/DataDog/zstd/huf_compress.c create mode 100644 vendor/github.com/DataDog/zstd/huf_decompress.c create mode 100644 vendor/github.com/DataDog/zstd/mem.h create mode 100644 vendor/github.com/DataDog/zstd/pool.c create mode 100644 vendor/github.com/DataDog/zstd/pool.h create mode 100644 vendor/github.com/DataDog/zstd/threading.c create mode 100644 vendor/github.com/DataDog/zstd/threading.h create mode 100644 vendor/github.com/DataDog/zstd/travis_test_32.sh create mode 100644 vendor/github.com/DataDog/zstd/update.txt create mode 100644 vendor/github.com/DataDog/zstd/xxhash.c create mode 100644 vendor/github.com/DataDog/zstd/xxhash.h create mode 100644 vendor/github.com/DataDog/zstd/zbuff.h create mode 100644 vendor/github.com/DataDog/zstd/zbuff_common.c create mode 100644 vendor/github.com/DataDog/zstd/zbuff_compress.c create mode 100644 vendor/github.com/DataDog/zstd/zbuff_decompress.c create mode 100644 vendor/github.com/DataDog/zstd/zdict.c create mode 100644 vendor/github.com/DataDog/zstd/zdict.h create mode 100644 vendor/github.com/DataDog/zstd/zstd.go create mode 100644 vendor/github.com/DataDog/zstd/zstd.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_common.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_compress.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_compress_internal.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_decompress.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_double_fast.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_double_fast.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_errors.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_fast.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_fast.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_internal.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_lazy.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_lazy.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_ldm.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_ldm.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_legacy.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_opt.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_opt.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_stream.go create mode 100644 vendor/github.com/DataDog/zstd/zstd_v01.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_v01.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_v02.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_v02.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_v03.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_v03.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_v04.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_v04.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_v05.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_v05.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_v06.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_v06.h create mode 100644 vendor/github.com/DataDog/zstd/zstd_v07.c create mode 100644 vendor/github.com/DataDog/zstd/zstd_v07.h create mode 100644 vendor/github.com/DataDog/zstd/zstdmt_compress.c create mode 100644 vendor/github.com/DataDog/zstd/zstdmt_compress.h create mode 100644 vendor/github.com/Shopify/sarama/.gitignore create mode 100644 vendor/github.com/Shopify/sarama/.travis.yml create mode 100644 vendor/github.com/Shopify/sarama/CHANGELOG.md create mode 100644 vendor/github.com/Shopify/sarama/LICENSE create mode 100644 vendor/github.com/Shopify/sarama/Makefile create mode 100644 vendor/github.com/Shopify/sarama/README.md create mode 100644 vendor/github.com/Shopify/sarama/Vagrantfile create mode 100644 vendor/github.com/Shopify/sarama/acl_bindings.go create mode 100644 vendor/github.com/Shopify/sarama/acl_create_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_create_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_delete_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_delete_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_describe_request.go create mode 100644 vendor/github.com/Shopify/sarama/acl_describe_response.go create mode 100644 vendor/github.com/Shopify/sarama/acl_filter.go create mode 100644 vendor/github.com/Shopify/sarama/acl_types.go create mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/admin.go create mode 100644 vendor/github.com/Shopify/sarama/alter_configs_request.go create mode 100644 vendor/github.com/Shopify/sarama/alter_configs_response.go create mode 100644 vendor/github.com/Shopify/sarama/api_versions_request.go create mode 100644 vendor/github.com/Shopify/sarama/api_versions_response.go create mode 100644 vendor/github.com/Shopify/sarama/async_producer.go create mode 100644 vendor/github.com/Shopify/sarama/balance_strategy.go create mode 100644 vendor/github.com/Shopify/sarama/broker.go create mode 100644 vendor/github.com/Shopify/sarama/client.go create mode 100644 vendor/github.com/Shopify/sarama/compress.go create mode 100644 vendor/github.com/Shopify/sarama/config.go create mode 100644 vendor/github.com/Shopify/sarama/config_resource_type.go create mode 100644 vendor/github.com/Shopify/sarama/consumer.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_group.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response.go create mode 100644 vendor/github.com/Shopify/sarama/crc32_field.go create mode 100644 vendor/github.com/Shopify/sarama/create_partitions_request.go create mode 100644 vendor/github.com/Shopify/sarama/create_partitions_response.go create mode 100644 vendor/github.com/Shopify/sarama/create_topics_request.go create mode 100644 vendor/github.com/Shopify/sarama/create_topics_response.go create mode 100644 vendor/github.com/Shopify/sarama/decompress.go create mode 100644 vendor/github.com/Shopify/sarama/delete_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_records_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_records_response.go create mode 100644 vendor/github.com/Shopify/sarama/delete_topics_request.go create mode 100644 vendor/github.com/Shopify/sarama/delete_topics_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_configs_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_configs_response.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/dev.yml create mode 100644 vendor/github.com/Shopify/sarama/encoder_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/end_txn_request.go create mode 100644 vendor/github.com/Shopify/sarama/end_txn_response.go create mode 100644 vendor/github.com/Shopify/sarama/errors.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_request.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_response.go create mode 100644 vendor/github.com/Shopify/sarama/find_coordinator_request.go create mode 100644 vendor/github.com/Shopify/sarama/find_coordinator_response.go create mode 100644 vendor/github.com/Shopify/sarama/go.mod create mode 100644 vendor/github.com/Shopify/sarama/go.sum create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response.go create mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_request.go create mode 100644 vendor/github.com/Shopify/sarama/init_producer_id_response.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/length_field.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/message.go create mode 100644 vendor/github.com/Shopify/sarama/message_set.go create mode 100644 vendor/github.com/Shopify/sarama/metadata_request.go create mode 100644 vendor/github.com/Shopify/sarama/metadata_response.go create mode 100644 vendor/github.com/Shopify/sarama/metrics.go create mode 100644 vendor/github.com/Shopify/sarama/mockbroker.go create mode 100644 vendor/github.com/Shopify/sarama/mockresponses.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response.go create mode 100644 vendor/github.com/Shopify/sarama/offset_manager.go create mode 100644 vendor/github.com/Shopify/sarama/offset_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_response.go create mode 100644 vendor/github.com/Shopify/sarama/packet_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/packet_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/partitioner.go create mode 100644 vendor/github.com/Shopify/sarama/prep_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/produce_request.go create mode 100644 vendor/github.com/Shopify/sarama/produce_response.go create mode 100644 vendor/github.com/Shopify/sarama/produce_set.go create mode 100644 vendor/github.com/Shopify/sarama/real_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/real_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/record.go create mode 100644 vendor/github.com/Shopify/sarama/record_batch.go create mode 100644 vendor/github.com/Shopify/sarama/records.go create mode 100644 vendor/github.com/Shopify/sarama/request.go create mode 100644 vendor/github.com/Shopify/sarama/response_header.go create mode 100644 vendor/github.com/Shopify/sarama/sarama.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_authenticate_request.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_authenticate_response.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/sync_producer.go create mode 100644 vendor/github.com/Shopify/sarama/timestamp.go create mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_request.go create mode 100644 vendor/github.com/Shopify/sarama/txn_offset_commit_response.go create mode 100644 vendor/github.com/Shopify/sarama/utils.go create mode 100644 vendor/github.com/Shopify/sarama/zstd_cgo.go create mode 100644 vendor/github.com/Shopify/sarama/zstd_fallback.go create mode 100644 vendor/github.com/apache/thrift/LICENSE create mode 100644 vendor/github.com/apache/thrift/NOTICE create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/client.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/context.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/exception.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/field.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/header_context.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_client.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/numeric.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/serializer.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/socket.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/type.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/route53iface/interface.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/caddyserver/caddy/.gitattributes create mode 100644 vendor/github.com/caddyserver/caddy/.gitignore create mode 100644 vendor/github.com/caddyserver/caddy/LICENSE.txt create mode 100644 vendor/github.com/caddyserver/caddy/README.md create mode 100644 vendor/github.com/caddyserver/caddy/assets.go create mode 100644 vendor/github.com/caddyserver/caddy/azure-pipelines.yml create mode 100644 vendor/github.com/caddyserver/caddy/caddy.go create mode 100644 vendor/github.com/caddyserver/caddy/caddyfile/dispenser.go create mode 100644 vendor/github.com/caddyserver/caddy/caddyfile/json.go create mode 100644 vendor/github.com/caddyserver/caddy/caddyfile/lexer.go create mode 100644 vendor/github.com/caddyserver/caddy/caddyfile/parse.go create mode 100644 vendor/github.com/caddyserver/caddy/commands.go create mode 100644 vendor/github.com/caddyserver/caddy/controller.go create mode 100644 vendor/github.com/caddyserver/caddy/go.mod create mode 100644 vendor/github.com/caddyserver/caddy/go.sum create mode 100644 vendor/github.com/caddyserver/caddy/onevent/hook/config.go create mode 100644 vendor/github.com/caddyserver/caddy/onevent/hook/hook.go create mode 100644 vendor/github.com/caddyserver/caddy/onevent/on.go create mode 100644 vendor/github.com/caddyserver/caddy/plugins.go create mode 100644 vendor/github.com/caddyserver/caddy/rlimit_nonposix.go create mode 100644 vendor/github.com/caddyserver/caddy/rlimit_posix.go create mode 100644 vendor/github.com/caddyserver/caddy/sigtrap.go create mode 100644 vendor/github.com/caddyserver/caddy/sigtrap_nonposix.go create mode 100644 vendor/github.com/caddyserver/caddy/sigtrap_posix.go create mode 100644 vendor/github.com/caddyserver/caddy/telemetry/collection.go create mode 100644 vendor/github.com/caddyserver/caddy/telemetry/telemetry.go create mode 100644 vendor/github.com/caddyserver/caddy/upgrade.go create mode 100644 vendor/github.com/celebdor/zeroconf/.gitignore create mode 100644 vendor/github.com/celebdor/zeroconf/LICENSE create mode 100644 vendor/github.com/celebdor/zeroconf/README.md create mode 100644 vendor/github.com/celebdor/zeroconf/client.go create mode 100644 vendor/github.com/celebdor/zeroconf/connection.go create mode 100644 vendor/github.com/celebdor/zeroconf/doc.go create mode 100644 vendor/github.com/celebdor/zeroconf/server.go create mode 100644 vendor/github.com/celebdor/zeroconf/service.go create mode 100644 vendor/github.com/celebdor/zeroconf/utils.go create mode 100644 vendor/github.com/cenkalti/backoff/.gitignore create mode 100644 vendor/github.com/cenkalti/backoff/.travis.yml create mode 100644 vendor/github.com/cenkalti/backoff/LICENSE create mode 100644 vendor/github.com/cenkalti/backoff/README.md create mode 100644 vendor/github.com/cenkalti/backoff/backoff.go create mode 100644 vendor/github.com/cenkalti/backoff/context.go create mode 100644 vendor/github.com/cenkalti/backoff/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/retry.go create mode 100644 vendor/github.com/cenkalti/backoff/ticker.go create mode 100644 vendor/github.com/cenkalti/backoff/tries.go create mode 100644 vendor/github.com/cespare/xxhash/v2/.travis.yml create mode 100644 vendor/github.com/cespare/xxhash/v2/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/v2/README.md create mode 100644 vendor/github.com/cespare/xxhash/v2/go.mod create mode 100644 vendor/github.com/cespare/xxhash/v2/go.sum create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go create mode 100644 vendor/github.com/coredns/federation/OWNERS create mode 100644 vendor/github.com/coredns/federation/README.md create mode 100644 vendor/github.com/coredns/federation/federation.go create mode 100644 vendor/github.com/coredns/federation/kubernetes_federations.go create mode 100644 vendor/github.com/coredns/federation/parse.go create mode 100644 vendor/github.com/coredns/federation/setup.go create mode 100644 vendor/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/coreos/go-systemd/NOTICE create mode 100644 vendor/github.com/coreos/go-systemd/journal/journal.go create mode 100644 vendor/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/.gitignore create mode 100644 vendor/github.com/dgrijalva/jwt-go/.travis.yml create mode 100644 vendor/github.com/dgrijalva/jwt-go/LICENSE create mode 100644 vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md create mode 100644 vendor/github.com/dgrijalva/jwt-go/README.md create mode 100644 vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md create mode 100644 vendor/github.com/dgrijalva/jwt-go/claims.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/doc.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/errors.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/hmac.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/map_claims.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/none.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/parser.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_pss.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/rsa_utils.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/signing_method.go create mode 100644 vendor/github.com/dgrijalva/jwt-go/token.go create mode 100644 vendor/github.com/dimchansky/utfbom/.gitignore create mode 100644 vendor/github.com/dimchansky/utfbom/.travis.yml create mode 100644 vendor/github.com/dimchansky/utfbom/LICENSE create mode 100644 vendor/github.com/dimchansky/utfbom/README.md create mode 100644 vendor/github.com/dimchansky/utfbom/go.mod create mode 100644 vendor/github.com/dimchansky/utfbom/utfbom.go create mode 100644 vendor/github.com/dnstap/golang-dnstap/.gitignore create mode 100644 vendor/github.com/dnstap/golang-dnstap/COPYRIGHT create mode 100644 vendor/github.com/dnstap/golang-dnstap/FrameStreamInput.go create mode 100644 vendor/github.com/dnstap/golang-dnstap/FrameStreamOutput.go create mode 100644 vendor/github.com/dnstap/golang-dnstap/FrameStreamSockInput.go create mode 100644 vendor/github.com/dnstap/golang-dnstap/LICENSE create mode 100644 vendor/github.com/dnstap/golang-dnstap/QuietTextFormat.go create mode 100644 vendor/github.com/dnstap/golang-dnstap/README create mode 100644 vendor/github.com/dnstap/golang-dnstap/TextOutput.go create mode 100644 vendor/github.com/dnstap/golang-dnstap/YamlFormat.go create mode 100644 vendor/github.com/dnstap/golang-dnstap/dnstap.go create mode 100644 vendor/github.com/dnstap/golang-dnstap/dnstap.pb.go create mode 100644 vendor/github.com/eapache/go-resiliency/LICENSE create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/README.md create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/.gitignore create mode 100644 vendor/github.com/eapache/go-xerial-snappy/.travis.yml create mode 100644 vendor/github.com/eapache/go-xerial-snappy/LICENSE create mode 100644 vendor/github.com/eapache/go-xerial-snappy/README.md create mode 100644 vendor/github.com/eapache/go-xerial-snappy/fuzz.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy.go create mode 100644 vendor/github.com/eapache/queue/.gitignore create mode 100644 vendor/github.com/eapache/queue/.travis.yml create mode 100644 vendor/github.com/eapache/queue/LICENSE create mode 100644 vendor/github.com/eapache/queue/README.md create mode 100644 vendor/github.com/eapache/queue/queue.go create mode 100644 vendor/github.com/evanphx/json-patch/.travis.yml create mode 100644 vendor/github.com/evanphx/json-patch/LICENSE create mode 100644 vendor/github.com/evanphx/json-patch/README.md create mode 100644 vendor/github.com/evanphx/json-patch/merge.go create mode 100644 vendor/github.com/evanphx/json-patch/patch.go create mode 100644 vendor/github.com/farsightsec/golang-framestream/.gitignore create mode 100644 vendor/github.com/farsightsec/golang-framestream/COPYRIGHT create mode 100644 vendor/github.com/farsightsec/golang-framestream/Control.go create mode 100644 vendor/github.com/farsightsec/golang-framestream/Decoder.go create mode 100644 vendor/github.com/farsightsec/golang-framestream/Encoder.go create mode 100644 vendor/github.com/farsightsec/golang-framestream/LICENSE create mode 100644 vendor/github.com/farsightsec/golang-framestream/README.md create mode 100644 vendor/github.com/farsightsec/golang-framestream/framestream.go create mode 100644 vendor/github.com/flynn/go-shlex/COPYING create mode 100644 vendor/github.com/flynn/go-shlex/Makefile create mode 100644 vendor/github.com/flynn/go-shlex/README.md create mode 100644 vendor/github.com/flynn/go-shlex/shlex.go create mode 100644 vendor/github.com/go-logfmt/logfmt/.gitignore create mode 100644 vendor/github.com/go-logfmt/logfmt/.travis.yml create mode 100644 vendor/github.com/go-logfmt/logfmt/CHANGELOG.md create mode 100644 vendor/github.com/go-logfmt/logfmt/LICENSE create mode 100644 vendor/github.com/go-logfmt/logfmt/README.md create mode 100644 vendor/github.com/go-logfmt/logfmt/decode.go create mode 100644 vendor/github.com/go-logfmt/logfmt/doc.go create mode 100644 vendor/github.com/go-logfmt/logfmt/encode.go create mode 100644 vendor/github.com/go-logfmt/logfmt/fuzz.go create mode 100644 vendor/github.com/go-logfmt/logfmt/go.mod create mode 100644 vendor/github.com/go-logfmt/logfmt/go.sum create mode 100644 vendor/github.com/go-logfmt/logfmt/jsonstring.go create mode 100644 vendor/github.com/gogo/protobuf/AUTHORS create mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go create mode 100644 vendor/github.com/golang/protobuf/AUTHORS create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto create mode 100644 vendor/github.com/golang/snappy/.gitignore create mode 100644 vendor/github.com/golang/snappy/AUTHORS create mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/golang/snappy/LICENSE create mode 100644 vendor/github.com/golang/snappy/README create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go create mode 100644 vendor/github.com/google/gofuzz/.travis.yml create mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md create mode 100644 vendor/github.com/google/gofuzz/LICENSE create mode 100644 vendor/github.com/google/gofuzz/README.md create mode 100644 vendor/github.com/google/gofuzz/doc.go create mode 100644 vendor/github.com/google/gofuzz/fuzz.go create mode 100644 vendor/github.com/google/gofuzz/go.mod create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/go.mod create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/LICENSE create mode 100644 vendor/github.com/googleapis/gax-go/v2/call_option.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/gax.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/go.mod create mode 100644 vendor/github.com/googleapis/gax-go/v2/go.sum create mode 100644 vendor/github.com/googleapis/gax-go/v2/header.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/invoke.go create mode 100644 vendor/github.com/googleapis/gnostic/LICENSE create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json create mode 100644 vendor/github.com/googleapis/gnostic/compiler/README.md create mode 100644 vendor/github.com/googleapis/gnostic/compiler/context.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/error.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/extension-handler.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/helpers.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/reader.go create mode 100644 vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh create mode 100644 vendor/github.com/googleapis/gnostic/extensions/README.md create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.pb.go create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.proto create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extensions.go create mode 100644 vendor/github.com/gophercloud/gophercloud/.gitignore create mode 100644 vendor/github.com/gophercloud/gophercloud/.travis.yml create mode 100644 vendor/github.com/gophercloud/gophercloud/.zuul.yaml create mode 100644 vendor/github.com/gophercloud/gophercloud/CHANGELOG.md create mode 100644 vendor/github.com/gophercloud/gophercloud/LICENSE create mode 100644 vendor/github.com/gophercloud/gophercloud/README.md create mode 100644 vendor/github.com/gophercloud/gophercloud/auth_options.go create mode 100644 vendor/github.com/gophercloud/gophercloud/auth_result.go create mode 100644 vendor/github.com/gophercloud/gophercloud/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/endpoint_search.go create mode 100644 vendor/github.com/gophercloud/gophercloud/errors.go create mode 100644 vendor/github.com/gophercloud/gophercloud/go.mod create mode 100644 vendor/github.com/gophercloud/gophercloud/go.sum create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/client.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/errors.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/http.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/linked.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/marker.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/pager.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/pkg.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/single.go create mode 100644 vendor/github.com/gophercloud/gophercloud/params.go create mode 100644 vendor/github.com/gophercloud/gophercloud/provider_client.go create mode 100644 vendor/github.com/gophercloud/gophercloud/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/service_client.go create mode 100644 vendor/github.com/gophercloud/gophercloud/util.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go create mode 100644 vendor/github.com/hashicorp/golang-lru/.gitignore create mode 100644 vendor/github.com/hashicorp/golang-lru/2q.go create mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/README.md create mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/go.mod create mode 100644 vendor/github.com/hashicorp/golang-lru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go create mode 100644 vendor/github.com/imdario/mergo/.gitignore create mode 100644 vendor/github.com/imdario/mergo/.travis.yml create mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/infobloxopen/go-trees/LICENSE create mode 100644 vendor/github.com/infobloxopen/go-trees/iptree/iptree.go create mode 100644 vendor/github.com/infobloxopen/go-trees/numtree/node32.go create mode 100644 vendor/github.com/infobloxopen/go-trees/numtree/node64.go create mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore create mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml create mode 100644 vendor/github.com/json-iterator/go/.gitignore create mode 100644 vendor/github.com/json-iterator/go/.travis.yml create mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock create mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml create mode 100644 vendor/github.com/json-iterator/go/LICENSE create mode 100644 vendor/github.com/json-iterator/go/README.md create mode 100644 vendor/github.com/json-iterator/go/adapter.go create mode 100644 vendor/github.com/json-iterator/go/any.go create mode 100644 vendor/github.com/json-iterator/go/any_array.go create mode 100644 vendor/github.com/json-iterator/go/any_bool.go create mode 100644 vendor/github.com/json-iterator/go/any_float.go create mode 100644 vendor/github.com/json-iterator/go/any_int32.go create mode 100644 vendor/github.com/json-iterator/go/any_int64.go create mode 100644 vendor/github.com/json-iterator/go/any_invalid.go create mode 100644 vendor/github.com/json-iterator/go/any_nil.go create mode 100644 vendor/github.com/json-iterator/go/any_number.go create mode 100644 vendor/github.com/json-iterator/go/any_object.go create mode 100644 vendor/github.com/json-iterator/go/any_str.go create mode 100644 vendor/github.com/json-iterator/go/any_uint32.go create mode 100644 vendor/github.com/json-iterator/go/any_uint64.go create mode 100644 vendor/github.com/json-iterator/go/build.sh create mode 100644 vendor/github.com/json-iterator/go/config.go create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 vendor/github.com/json-iterator/go/go.mod create mode 100644 vendor/github.com/json-iterator/go/go.sum create mode 100644 vendor/github.com/json-iterator/go/iter.go create mode 100644 vendor/github.com/json-iterator/go/iter_array.go create mode 100644 vendor/github.com/json-iterator/go/iter_float.go create mode 100644 vendor/github.com/json-iterator/go/iter_int.go create mode 100644 vendor/github.com/json-iterator/go/iter_object.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go create mode 100644 vendor/github.com/json-iterator/go/iter_str.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go create mode 100644 vendor/github.com/json-iterator/go/pool.go create mode 100644 vendor/github.com/json-iterator/go/reflect.go create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go create mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go create mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go create mode 100644 vendor/github.com/json-iterator/go/stream.go create mode 100644 vendor/github.com/json-iterator/go/stream_float.go create mode 100644 vendor/github.com/json-iterator/go/stream_int.go create mode 100644 vendor/github.com/json-iterator/go/stream_str.go create mode 100644 vendor/github.com/json-iterator/go/test.sh create mode 100644 vendor/github.com/kr/logfmt/.gitignore create mode 100644 vendor/github.com/kr/logfmt/Readme create mode 100644 vendor/github.com/kr/logfmt/decode.go create mode 100644 vendor/github.com/kr/logfmt/scanner.go create mode 100644 vendor/github.com/kr/logfmt/unquote.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/miekg/dns/.codecov.yml create mode 100644 vendor/github.com/miekg/dns/.gitignore create mode 100644 vendor/github.com/miekg/dns/.travis.yml create mode 100644 vendor/github.com/miekg/dns/AUTHORS create mode 100644 vendor/github.com/miekg/dns/CODEOWNERS create mode 100644 vendor/github.com/miekg/dns/CONTRIBUTORS create mode 100644 vendor/github.com/miekg/dns/COPYRIGHT create mode 100644 vendor/github.com/miekg/dns/LICENSE create mode 100644 vendor/github.com/miekg/dns/Makefile.fuzz create mode 100644 vendor/github.com/miekg/dns/Makefile.release create mode 100644 vendor/github.com/miekg/dns/README.md create mode 100644 vendor/github.com/miekg/dns/acceptfunc.go create mode 100644 vendor/github.com/miekg/dns/client.go create mode 100644 vendor/github.com/miekg/dns/clientconfig.go create mode 100644 vendor/github.com/miekg/dns/dane.go create mode 100644 vendor/github.com/miekg/dns/defaults.go create mode 100644 vendor/github.com/miekg/dns/dns.go create mode 100644 vendor/github.com/miekg/dns/dnssec.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go create mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go create mode 100644 vendor/github.com/miekg/dns/doc.go create mode 100644 vendor/github.com/miekg/dns/duplicate.go create mode 100644 vendor/github.com/miekg/dns/edns.go create mode 100644 vendor/github.com/miekg/dns/format.go create mode 100644 vendor/github.com/miekg/dns/fuzz.go create mode 100644 vendor/github.com/miekg/dns/generate.go create mode 100644 vendor/github.com/miekg/dns/go.mod create mode 100644 vendor/github.com/miekg/dns/go.sum create mode 100644 vendor/github.com/miekg/dns/labels.go create mode 100644 vendor/github.com/miekg/dns/listen_go111.go create mode 100644 vendor/github.com/miekg/dns/listen_go_not111.go create mode 100644 vendor/github.com/miekg/dns/msg.go create mode 100644 vendor/github.com/miekg/dns/msg_helpers.go create mode 100644 vendor/github.com/miekg/dns/msg_truncate.go create mode 100644 vendor/github.com/miekg/dns/nsecx.go create mode 100644 vendor/github.com/miekg/dns/privaterr.go create mode 100644 vendor/github.com/miekg/dns/reverse.go create mode 100644 vendor/github.com/miekg/dns/sanitize.go create mode 100644 vendor/github.com/miekg/dns/scan.go create mode 100644 vendor/github.com/miekg/dns/scan_rr.go create mode 100644 vendor/github.com/miekg/dns/serve_mux.go create mode 100644 vendor/github.com/miekg/dns/server.go create mode 100644 vendor/github.com/miekg/dns/sig0.go create mode 100644 vendor/github.com/miekg/dns/singleinflight.go create mode 100644 vendor/github.com/miekg/dns/smimea.go create mode 100644 vendor/github.com/miekg/dns/tlsa.go create mode 100644 vendor/github.com/miekg/dns/tsig.go create mode 100644 vendor/github.com/miekg/dns/types.go create mode 100644 vendor/github.com/miekg/dns/udp.go create mode 100644 vendor/github.com/miekg/dns/udp_windows.go create mode 100644 vendor/github.com/miekg/dns/update.go create mode 100644 vendor/github.com/miekg/dns/version.go create mode 100644 vendor/github.com/miekg/dns/xfr.go create mode 100644 vendor/github.com/miekg/dns/zduplicate.go create mode 100644 vendor/github.com/miekg/dns/zmsg.go create mode 100644 vendor/github.com/miekg/dns/ztypes.go create mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/github.com/mitchellh/go-homedir/go.mod create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/github.com/modern-go/concurrent/.gitignore create mode 100644 vendor/github.com/modern-go/concurrent/.travis.yml create mode 100644 vendor/github.com/modern-go/concurrent/LICENSE create mode 100644 vendor/github.com/modern-go/concurrent/README.md create mode 100644 vendor/github.com/modern-go/concurrent/executor.go create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go create mode 100644 vendor/github.com/modern-go/concurrent/log.go create mode 100644 vendor/github.com/modern-go/concurrent/test.sh create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go create mode 100644 vendor/github.com/modern-go/reflect2/.gitignore create mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE create mode 100644 vendor/github.com/modern-go/reflect2/README.md create mode 100644 vendor/github.com/modern-go/reflect2/go_above_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_17.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_19.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go create mode 100644 vendor/github.com/modern-go/reflect2/test.sh create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go create mode 100644 vendor/github.com/openshift/coredns-mdns/.gitignore create mode 100644 vendor/github.com/openshift/coredns-mdns/CONTRIBUTING.md create mode 100644 vendor/github.com/openshift/coredns-mdns/DCO create mode 100644 vendor/github.com/openshift/coredns-mdns/Dockerfile create mode 100644 vendor/github.com/openshift/coredns-mdns/Jenkinsfile create mode 100644 vendor/github.com/openshift/coredns-mdns/LICENSE create mode 100644 vendor/github.com/openshift/coredns-mdns/Makefile create mode 100644 vendor/github.com/openshift/coredns-mdns/OWNERS create mode 100644 vendor/github.com/openshift/coredns-mdns/README.md create mode 100644 vendor/github.com/openshift/coredns-mdns/etcd.service create mode 100644 vendor/github.com/openshift/coredns-mdns/go.mod create mode 100644 vendor/github.com/openshift/coredns-mdns/go.sum create mode 100644 vendor/github.com/openshift/coredns-mdns/mdns.go create mode 100644 vendor/github.com/openshift/coredns-mdns/setup.go create mode 100644 vendor/github.com/opentracing-contrib/go-observer/.gitignore create mode 100644 vendor/github.com/opentracing-contrib/go-observer/LICENSE create mode 100644 vendor/github.com/opentracing-contrib/go-observer/README.md create mode 100644 vendor/github.com/opentracing-contrib/go-observer/observer.go create mode 100644 vendor/github.com/opentracing/opentracing-go/.gitignore create mode 100644 vendor/github.com/opentracing/opentracing-go/.travis.yml create mode 100644 vendor/github.com/opentracing/opentracing-go/CHANGELOG.md create mode 100644 vendor/github.com/opentracing/opentracing-go/LICENSE create mode 100644 vendor/github.com/opentracing/opentracing-go/Makefile create mode 100644 vendor/github.com/opentracing/opentracing-go/README.md create mode 100644 vendor/github.com/opentracing/opentracing-go/ext/tags.go create mode 100644 vendor/github.com/opentracing/opentracing-go/globaltracer.go create mode 100644 vendor/github.com/opentracing/opentracing-go/gocontext.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/field.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/util.go create mode 100644 vendor/github.com/opentracing/opentracing-go/mocktracer/mocklogrecord.go create mode 100644 vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go create mode 100644 vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go create mode 100644 vendor/github.com/opentracing/opentracing-go/mocktracer/propagation.go create mode 100644 vendor/github.com/opentracing/opentracing-go/noop.go create mode 100644 vendor/github.com/opentracing/opentracing-go/propagation.go create mode 100644 vendor/github.com/opentracing/opentracing-go/span.go create mode 100644 vendor/github.com/opentracing/opentracing-go/tracer.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/.gitignore create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/.travis.yml create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/LICENSE create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/Makefile create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/README.md create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/collector-http.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/collector-kafka.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/collector-scribe.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/collector.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/context.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/debug.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/event.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/flag/flags.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/log-materializers.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/logger.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/observer.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/propagation.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/propagation_ot.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/raw.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/recorder.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/sample.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/span.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/tracer.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/types/traceid.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/util.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/wire/carrier.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/wire/gen.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/wire/wire.pb.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/wire/wire.proto create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/zipkin-endpoint.go create mode 100644 vendor/github.com/openzipkin-contrib/zipkin-go-opentracing/zipkin-recorder.go create mode 100644 vendor/github.com/philhofer/fwd/LICENSE.md create mode 100644 vendor/github.com/philhofer/fwd/README.md create mode 100644 vendor/github.com/philhofer/fwd/reader.go create mode 100644 vendor/github.com/philhofer/fwd/writer.go create mode 100644 vendor/github.com/philhofer/fwd/writer_appengine.go create mode 100644 vendor/github.com/philhofer/fwd/writer_unsafe.go create mode 100644 vendor/github.com/pierrec/lz4/.gitignore create mode 100644 vendor/github.com/pierrec/lz4/.travis.yml create mode 100644 vendor/github.com/pierrec/lz4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/README.md create mode 100644 vendor/github.com/pierrec/lz4/block.go create mode 100644 vendor/github.com/pierrec/lz4/debug.go create mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go create mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go create mode 100644 vendor/github.com/pierrec/lz4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go create mode 100644 vendor/github.com/pierrec/lz4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/writer.go create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/wrap.go create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/procfs/.gitignore create mode 100644 vendor/github.com/prometheus/procfs/.golangci.yml create mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/procfs/Makefile create mode 100644 vendor/github.com/prometheus/procfs/Makefile.common create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/arp.go create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo.go create mode 100644 vendor/github.com/prometheus/procfs/crypto.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/go.mod create mode 100644 vendor/github.com/prometheus/procfs/go.sum create mode 100644 vendor/github.com/prometheus/procfs/internal/fs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/valueparser.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/mountinfo.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/net_softnet.go create mode 100644 vendor/github.com/prometheus/procfs/net_unix.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_environ.go create mode 100644 vendor/github.com/prometheus/procfs/proc_fdinfo.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_psi.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_status.go create mode 100644 vendor/github.com/prometheus/procfs/schedstat.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/ttar create mode 100644 vendor/github.com/prometheus/procfs/vm.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/zoneinfo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/.gitignore create mode 100644 vendor/github.com/rcrowley/go-metrics/.travis.yml create mode 100644 vendor/github.com/rcrowley/go-metrics/LICENSE create mode 100644 vendor/github.com/rcrowley/go-metrics/README.md create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go create mode 100644 vendor/github.com/rcrowley/go-metrics/memory.md create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go create mode 100644 vendor/github.com/rcrowley/go-metrics/validate.sh create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go create mode 100644 vendor/github.com/spf13/pflag/.gitignore create mode 100644 vendor/github.com/spf13/pflag/.travis.yml create mode 100644 vendor/github.com/spf13/pflag/LICENSE create mode 100644 vendor/github.com/spf13/pflag/README.md create mode 100644 vendor/github.com/spf13/pflag/bool.go create mode 100644 vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 vendor/github.com/spf13/pflag/bytes.go create mode 100644 vendor/github.com/spf13/pflag/count.go create mode 100644 vendor/github.com/spf13/pflag/duration.go create mode 100644 vendor/github.com/spf13/pflag/duration_slice.go create mode 100644 vendor/github.com/spf13/pflag/flag.go create mode 100644 vendor/github.com/spf13/pflag/float32.go create mode 100644 vendor/github.com/spf13/pflag/float64.go create mode 100644 vendor/github.com/spf13/pflag/golangflag.go create mode 100644 vendor/github.com/spf13/pflag/int.go create mode 100644 vendor/github.com/spf13/pflag/int16.go create mode 100644 vendor/github.com/spf13/pflag/int32.go create mode 100644 vendor/github.com/spf13/pflag/int64.go create mode 100644 vendor/github.com/spf13/pflag/int8.go create mode 100644 vendor/github.com/spf13/pflag/int_slice.go create mode 100644 vendor/github.com/spf13/pflag/ip.go create mode 100644 vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 vendor/github.com/spf13/pflag/ipmask.go create mode 100644 vendor/github.com/spf13/pflag/ipnet.go create mode 100644 vendor/github.com/spf13/pflag/string.go create mode 100644 vendor/github.com/spf13/pflag/string_array.go create mode 100644 vendor/github.com/spf13/pflag/string_slice.go create mode 100644 vendor/github.com/spf13/pflag/string_to_int.go create mode 100644 vendor/github.com/spf13/pflag/string_to_string.go create mode 100644 vendor/github.com/spf13/pflag/uint.go create mode 100644 vendor/github.com/spf13/pflag/uint16.go create mode 100644 vendor/github.com/spf13/pflag/uint32.go create mode 100644 vendor/github.com/spf13/pflag/uint64.go create mode 100644 vendor/github.com/spf13/pflag/uint8.go create mode 100644 vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 vendor/github.com/tinylib/msgp/LICENSE create mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_linux.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/advise_other.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/circular.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/defs.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/edit.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/elsize.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/errors.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/extension.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file_port.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/integers.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/number.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/purego.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read_bytes.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/size.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/unsafe.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write_bytes.go create mode 100644 vendor/go.etcd.io/etcd/LICENSE create mode 100644 vendor/go.etcd.io/etcd/NOTICE create mode 100644 vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go create mode 100644 vendor/go.etcd.io/etcd/auth/authpb/auth.proto create mode 100644 vendor/go.etcd.io/etcd/clientv3/README.md create mode 100644 vendor/go.etcd.io/etcd/clientv3/auth.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/balancer/utils.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/client.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/cluster.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/compact_op.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/compare.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/config.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/doc.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/kv.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/lease.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/logger.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/maintenance.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/op.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/options.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/retry.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/sort.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/txn.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/utils.go create mode 100644 vendor/go.etcd.io/etcd/clientv3/watch.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go create mode 100644 vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto create mode 100644 vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go create mode 100644 vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/log_level.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go create mode 100644 vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go create mode 100644 vendor/go.etcd.io/etcd/pkg/systemd/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/systemd/journal.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/doc.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/id.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/set.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/slice.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/urls.go create mode 100644 vendor/go.etcd.io/etcd/pkg/types/urlsmap.go create mode 100644 vendor/go.etcd.io/etcd/raft/OWNERS create mode 100644 vendor/go.etcd.io/etcd/raft/README.md create mode 100644 vendor/go.etcd.io/etcd/raft/bootstrap.go create mode 100644 vendor/go.etcd.io/etcd/raft/confchange/confchange.go create mode 100644 vendor/go.etcd.io/etcd/raft/confchange/restore.go create mode 100644 vendor/go.etcd.io/etcd/raft/design.md create mode 100644 vendor/go.etcd.io/etcd/raft/doc.go create mode 100644 vendor/go.etcd.io/etcd/raft/log.go create mode 100644 vendor/go.etcd.io/etcd/raft/log_unstable.go create mode 100644 vendor/go.etcd.io/etcd/raft/logger.go create mode 100644 vendor/go.etcd.io/etcd/raft/node.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/joint.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/majority.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/quorum.go create mode 100644 vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go create mode 100644 vendor/go.etcd.io/etcd/raft/raft.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/confchange.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/confstate.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go create mode 100644 vendor/go.etcd.io/etcd/raft/raftpb/raft.proto create mode 100644 vendor/go.etcd.io/etcd/raft/rawnode.go create mode 100644 vendor/go.etcd.io/etcd/raft/read_only.go create mode 100644 vendor/go.etcd.io/etcd/raft/status.go create mode 100644 vendor/go.etcd.io/etcd/raft/storage.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/inflights.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/progress.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/state.go create mode 100644 vendor/go.etcd.io/etcd/raft/tracker/tracker.go create mode 100644 vendor/go.etcd.io/etcd/raft/util.go create mode 100644 vendor/go.opencensus.io/.gitignore create mode 100644 vendor/go.opencensus.io/.travis.yml create mode 100644 vendor/go.opencensus.io/AUTHORS create mode 100644 vendor/go.opencensus.io/CONTRIBUTING.md create mode 100644 vendor/go.opencensus.io/Gopkg.lock create mode 100644 vendor/go.opencensus.io/Gopkg.toml create mode 100644 vendor/go.opencensus.io/LICENSE create mode 100644 vendor/go.opencensus.io/Makefile create mode 100644 vendor/go.opencensus.io/README.md create mode 100644 vendor/go.opencensus.io/appveyor.yml create mode 100644 vendor/go.opencensus.io/go.mod create mode 100644 vendor/go.opencensus.io/go.sum create mode 100644 vendor/go.opencensus.io/internal/internal.go create mode 100644 vendor/go.opencensus.io/internal/sanitize.go create mode 100644 vendor/go.opencensus.io/internal/tagencoding/tagencoding.go create mode 100644 vendor/go.opencensus.io/internal/traceinternals.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/doc.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/exemplar.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/label.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/metric.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/point.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/type_string.go create mode 100644 vendor/go.opencensus.io/metric/metricdata/unit.go create mode 100644 vendor/go.opencensus.io/metric/metricproducer/manager.go create mode 100644 vendor/go.opencensus.io/metric/metricproducer/producer.go create mode 100644 vendor/go.opencensus.io/opencensus.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/client.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/client_stats.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/doc.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/route.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/server.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/stats.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/trace.go create mode 100644 vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go create mode 100644 vendor/go.opencensus.io/resource/resource.go create mode 100644 vendor/go.opencensus.io/stats/doc.go create mode 100644 vendor/go.opencensus.io/stats/internal/record.go create mode 100644 vendor/go.opencensus.io/stats/measure.go create mode 100644 vendor/go.opencensus.io/stats/measure_float64.go create mode 100644 vendor/go.opencensus.io/stats/measure_int64.go create mode 100644 vendor/go.opencensus.io/stats/record.go create mode 100644 vendor/go.opencensus.io/stats/units.go create mode 100644 vendor/go.opencensus.io/stats/view/aggregation.go create mode 100644 vendor/go.opencensus.io/stats/view/aggregation_data.go create mode 100644 vendor/go.opencensus.io/stats/view/collector.go create mode 100644 vendor/go.opencensus.io/stats/view/doc.go create mode 100644 vendor/go.opencensus.io/stats/view/export.go create mode 100644 vendor/go.opencensus.io/stats/view/view.go create mode 100644 vendor/go.opencensus.io/stats/view/view_to_metric.go create mode 100644 vendor/go.opencensus.io/stats/view/worker.go create mode 100644 vendor/go.opencensus.io/stats/view/worker_commands.go create mode 100644 vendor/go.opencensus.io/tag/context.go create mode 100644 vendor/go.opencensus.io/tag/doc.go create mode 100644 vendor/go.opencensus.io/tag/key.go create mode 100644 vendor/go.opencensus.io/tag/map.go create mode 100644 vendor/go.opencensus.io/tag/map_codec.go create mode 100644 vendor/go.opencensus.io/tag/metadata.go create mode 100644 vendor/go.opencensus.io/tag/profile_19.go create mode 100644 vendor/go.opencensus.io/tag/profile_not19.go create mode 100644 vendor/go.opencensus.io/tag/validate.go create mode 100644 vendor/go.opencensus.io/trace/basetypes.go create mode 100644 vendor/go.opencensus.io/trace/config.go create mode 100644 vendor/go.opencensus.io/trace/doc.go create mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go create mode 100644 vendor/go.opencensus.io/trace/export.go create mode 100644 vendor/go.opencensus.io/trace/internal/internal.go create mode 100644 vendor/go.opencensus.io/trace/lrumap.go create mode 100644 vendor/go.opencensus.io/trace/propagation/propagation.go create mode 100644 vendor/go.opencensus.io/trace/sampling.go create mode 100644 vendor/go.opencensus.io/trace/spanbucket.go create mode 100644 vendor/go.opencensus.io/trace/spanstore.go create mode 100644 vendor/go.opencensus.io/trace/status_codes.go create mode 100644 vendor/go.opencensus.io/trace/trace.go create mode 100644 vendor/go.opencensus.io/trace/trace_go11.go create mode 100644 vendor/go.opencensus.io/trace/trace_nongo11.go create mode 100644 vendor/go.opencensus.io/trace/tracestate/tracestate.go create mode 100644 vendor/go.uber.org/atomic/.codecov.yml create mode 100644 vendor/go.uber.org/atomic/.gitignore create mode 100644 vendor/go.uber.org/atomic/.travis.yml create mode 100644 vendor/go.uber.org/atomic/LICENSE.txt create mode 100644 vendor/go.uber.org/atomic/Makefile create mode 100644 vendor/go.uber.org/atomic/README.md create mode 100644 vendor/go.uber.org/atomic/atomic.go create mode 100644 vendor/go.uber.org/atomic/glide.lock create mode 100644 vendor/go.uber.org/atomic/glide.yaml create mode 100644 vendor/go.uber.org/atomic/string.go create mode 100644 vendor/go.uber.org/multierr/.codecov.yml create mode 100644 vendor/go.uber.org/multierr/.gitignore create mode 100644 vendor/go.uber.org/multierr/.travis.yml create mode 100644 vendor/go.uber.org/multierr/CHANGELOG.md create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt create mode 100644 vendor/go.uber.org/multierr/Makefile create mode 100644 vendor/go.uber.org/multierr/README.md create mode 100644 vendor/go.uber.org/multierr/error.go create mode 100644 vendor/go.uber.org/multierr/glide.lock create mode 100644 vendor/go.uber.org/multierr/glide.yaml create mode 100644 vendor/go.uber.org/zap/.codecov.yml create mode 100644 vendor/go.uber.org/zap/.gitignore create mode 100644 vendor/go.uber.org/zap/.readme.tmpl create mode 100644 vendor/go.uber.org/zap/.travis.yml create mode 100644 vendor/go.uber.org/zap/CHANGELOG.md create mode 100644 vendor/go.uber.org/zap/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/zap/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/zap/FAQ.md create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/Makefile create mode 100644 vendor/go.uber.org/zap/README.md create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100644 vendor/go.uber.org/zap/check_license.sh create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/error.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/glide.lock create mode 100644 vendor/go.uber.org/zap/glide.yaml create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/global_go112.go create mode 100644 vendor/go.uber.org/zap/global_prego112.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/sink.go create mode 100644 vendor/go.uber.org/zap/stacktrace.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/error.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go create mode 100644 vendor/golang.org/x/crypto/AUTHORS create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_go113.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_aix.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go create mode 100644 vendor/golang.org/x/net/AUTHORS create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/bpf/asm.go create mode 100644 vendor/golang.org/x/net/bpf/constants.go create mode 100644 vendor/golang.org/x/net/bpf/doc.go create mode 100644 vendor/golang.org/x/net/bpf/instructions.go create mode 100644 vendor/golang.org/x/net/bpf/setter.go create mode 100644 vendor/golang.org/x/net/bpf/vm.go create mode 100644 vendor/golang.org/x/net/bpf/vm_instructions.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/net/http/httpguts/guts.go create mode 100644 vendor/golang.org/x/net/http/httpguts/httplex.go create mode 100644 vendor/golang.org/x/net/http2/.gitignore create mode 100644 vendor/golang.org/x/net/http2/Dockerfile create mode 100644 vendor/golang.org/x/net/http2/Makefile create mode 100644 vendor/golang.org/x/net/http2/README create mode 100644 vendor/golang.org/x/net/http2/ciphers.go create mode 100644 vendor/golang.org/x/net/http2/client_conn_pool.go create mode 100644 vendor/golang.org/x/net/http2/databuffer.go create mode 100644 vendor/golang.org/x/net/http2/errors.go create mode 100644 vendor/golang.org/x/net/http2/flow.go create mode 100644 vendor/golang.org/x/net/http2/frame.go create mode 100644 vendor/golang.org/x/net/http2/go111.go create mode 100644 vendor/golang.org/x/net/http2/gotrack.go create mode 100644 vendor/golang.org/x/net/http2/headermap.go create mode 100644 vendor/golang.org/x/net/http2/hpack/encode.go create mode 100644 vendor/golang.org/x/net/http2/hpack/hpack.go create mode 100644 vendor/golang.org/x/net/http2/hpack/huffman.go create mode 100644 vendor/golang.org/x/net/http2/hpack/tables.go create mode 100644 vendor/golang.org/x/net/http2/http2.go create mode 100644 vendor/golang.org/x/net/http2/not_go111.go create mode 100644 vendor/golang.org/x/net/http2/pipe.go create mode 100644 vendor/golang.org/x/net/http2/server.go create mode 100644 vendor/golang.org/x/net/http2/transport.go create mode 100644 vendor/golang.org/x/net/http2/write.go create mode 100644 vendor/golang.org/x/net/http2/writesched.go create mode 100644 vendor/golang.org/x/net/http2/writesched_priority.go create mode 100644 vendor/golang.org/x/net/http2/writesched_random.go create mode 100644 vendor/golang.org/x/net/idna/idna10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/idna9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/punycode.go create mode 100644 vendor/golang.org/x/net/idna/tables10.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables11.0.0.go create mode 100644 vendor/golang.org/x/net/idna/tables12.00.go create mode 100644 vendor/golang.org/x/net/idna/tables9.0.0.go create mode 100644 vendor/golang.org/x/net/idna/trie.go create mode 100644 vendor/golang.org/x/net/idna/trieval.go create mode 100644 vendor/golang.org/x/net/internal/iana/const.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/empty.s create mode 100644 vendor/golang.org/x/net/internal/socket/error_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/error_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/norace.go create mode 100644 vendor/golang.org/x/net/internal/socket/race.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_msg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_const_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linkname.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_netbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_posix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/ipv4/batch.go create mode 100644 vendor/golang.org/x/net/ipv4/control.go create mode 100644 vendor/golang.org/x/net/ipv4/control_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/control_pktinfo.go create mode 100644 vendor/golang.org/x/net/ipv4/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv4/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv4/doc.go create mode 100644 vendor/golang.org/x/net/ipv4/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv4/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv4/header.go create mode 100644 vendor/golang.org/x/net/ipv4/helper.go create mode 100644 vendor/golang.org/x/net/ipv4/iana.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/packet.go create mode 100644 vendor/golang.org/x/net/ipv4/payload.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_aix.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/batch.go create mode 100644 vendor/golang.org/x/net/ipv6/control.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv6/doc.go create mode 100644 vendor/golang.org/x/net/ipv6/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv6/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv6/header.go create mode 100644 vendor/golang.org/x/net/ipv6/helper.go create mode 100644 vendor/golang.org/x/net/ipv6/iana.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/payload.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_aix.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/go.mod create mode 100644 vendor/golang.org/x/oauth2/go.sum create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen1.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/doc.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/internal/doc.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/golang.org/x/sys/AUTHORS create mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS create mode 100644 vendor/golang.org/x/sys/LICENSE create mode 100644 vendor/golang.org/x/sys/PATENTS create mode 100644 vendor/golang.org/x/sys/unix/.gitignore create mode 100644 vendor/golang.org/x/sys/unix/README.md create mode 100644 vendor/golang.org/x/sys/unix/affinity_linux.go create mode 100644 vendor/golang.org/x/sys/unix/aliases.go create mode 100644 vendor/golang.org/x/sys/unix/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 vendor/golang.org/x/sys/unix/cap_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/constants.go create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/dev_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/dev_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/dev_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/dev_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_linux.go create mode 100644 vendor/golang.org/x/sys/unix/dev_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/dev_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/dirent.go create mode 100644 vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go create mode 100644 vendor/golang.org/x/sys/unix/fdset.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl.go create mode 100644 vendor/golang.org/x/sys/unix/mkall.sh create mode 100644 vendor/golang.org/x/sys/unix/mkerrors.sh create mode 100644 vendor/golang.org/x/sys/unix/pagesize_unix.go create mode 100644 vendor/golang.org/x/sys/unix/pledge_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/race.go create mode 100644 vendor/golang.org/x/sys/unix/race0.go create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdents.go create mode 100644 vendor/golang.org/x/sys/unix/readdirent_getdirentries.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go create mode 100644 vendor/golang.org/x/sys/unix/str.go create mode 100644 vendor/golang.org/x/sys/unix/syscall.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go create mode 100644 vendor/golang.org/x/sys/unix/timestruct.go create mode 100644 vendor/golang.org/x/sys/unix/unveil_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/xattr_bsd.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zptrace_x86_linux.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/aliases.go create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/golang.org/x/text/AUTHORS create mode 100644 vendor/golang.org/x/text/CONTRIBUTORS create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/golang.org/x/time/AUTHORS create mode 100644 vendor/golang.org/x/time/CONTRIBUTORS create mode 100644 vendor/golang.org/x/time/LICENSE create mode 100644 vendor/golang.org/x/time/PATENTS create mode 100644 vendor/golang.org/x/time/rate/rate.go create mode 100644 vendor/golang.org/x/xerrors/LICENSE create mode 100644 vendor/golang.org/x/xerrors/PATENTS create mode 100644 vendor/golang.org/x/xerrors/README create mode 100644 vendor/golang.org/x/xerrors/adaptor.go create mode 100644 vendor/golang.org/x/xerrors/codereview.cfg create mode 100644 vendor/golang.org/x/xerrors/doc.go create mode 100644 vendor/golang.org/x/xerrors/errors.go create mode 100644 vendor/golang.org/x/xerrors/fmt.go create mode 100644 vendor/golang.org/x/xerrors/format.go create mode 100644 vendor/golang.org/x/xerrors/frame.go create mode 100644 vendor/golang.org/x/xerrors/go.mod create mode 100644 vendor/golang.org/x/xerrors/internal/internal.go create mode 100644 vendor/golang.org/x/xerrors/wrap.go create mode 100644 vendor/google.golang.org/api/AUTHORS create mode 100644 vendor/google.golang.org/api/CONTRIBUTORS create mode 100644 vendor/google.golang.org/api/LICENSE create mode 100644 vendor/google.golang.org/api/dns/v1/dns-api.json create mode 100644 vendor/google.golang.org/api/dns/v1/dns-gen.go create mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go create mode 100644 vendor/google.golang.org/api/googleapi/transport/apikey.go create mode 100644 vendor/google.golang.org/api/googleapi/types.go create mode 100644 vendor/google.golang.org/api/internal/creds.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/buffer.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/doc.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/json.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/jsonfloat.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/media.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/params.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/resumable.go create mode 100644 vendor/google.golang.org/api/internal/gensupport/send.go create mode 100644 vendor/google.golang.org/api/internal/pool.go create mode 100644 vendor/google.golang.org/api/internal/service-account.json create mode 100644 vendor/google.golang.org/api/internal/settings.go create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go create mode 100644 vendor/google.golang.org/api/option/credentials_go19.go create mode 100644 vendor/google.golang.org/api/option/credentials_notgo19.go create mode 100644 vendor/google.golang.org/api/option/option.go create mode 100644 vendor/google.golang.org/api/transport/http/dial.go create mode 100644 vendor/google.golang.org/api/transport/http/dial_appengine.go create mode 100644 vendor/google.golang.org/api/transport/http/internal/propagation/http.go create mode 100644 vendor/google.golang.org/appengine/.travis.yml create mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/appengine/LICENSE create mode 100644 vendor/google.golang.org/appengine/README.md create mode 100644 vendor/google.golang.org/appengine/appengine.go create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 vendor/google.golang.org/appengine/errors.go create mode 100644 vendor/google.golang.org/appengine/go.mod create mode 100644 vendor/google.golang.org/appengine/go.sum create mode 100644 vendor/google.golang.org/appengine/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/api.go create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto create mode 100644 vendor/google.golang.org/appengine/internal/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/internal.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/main.go create mode 100644 vendor/google.golang.org/appengine/internal/main_common.go create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/net.go create mode 100644 vendor/google.golang.org/appengine/internal/regen.sh create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto create mode 100644 vendor/google.golang.org/appengine/namespace.go create mode 100644 vendor/google.golang.org/appengine/timeout.go create mode 100644 vendor/google.golang.org/appengine/travis_install.sh create mode 100644 vendor/google.golang.org/appengine/travis_test.sh create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls13.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/go.mod create mode 100644 vendor/google.golang.org/grpc/go.sum create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/proxy.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/Gopkg.toml create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ddtrace.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/app_types.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/cassandra.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/db.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/peer.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/priority.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/system.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/tags.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal/globaltracer.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/option.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/span.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/tracer.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/doc.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/metrics.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagator.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_msgp.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer_go11.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer_nongo11.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/globalconfig/globalconfig.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/log/log.go create mode 100644 vendor/gopkg.in/DataDog/dd-trace-go.v1/internal/version/version.go create mode 100644 vendor/gopkg.in/inf.v0/LICENSE create mode 100644 vendor/gopkg.in/inf.v0/dec.go create mode 100644 vendor/gopkg.in/inf.v0/rounder.go create mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/go.mod create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go create mode 100644 vendor/k8s.io/api/LICENSE create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apps/v1/doc.go create mode 100644 vendor/k8s.io/api/apps/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1/generated.proto create mode 100644 vendor/k8s.io/api/apps/v1/register.go create mode 100644 vendor/k8s.io/api/apps/v1/types.go create mode 100644 vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/apps/v1beta1/register.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/types.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/doc.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.proto create mode 100644 vendor/k8s.io/api/apps/v1beta2/register.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/types.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authentication/v1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1beta1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authorization/v1/doc.go create mode 100644 vendor/k8s.io/api/authorization/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/authorization/v1/generated.proto create mode 100644 vendor/k8s.io/api/authorization/v1/register.go create mode 100644 vendor/k8s.io/api/authorization/v1/types.go create mode 100644 vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/authorization/v1beta1/register.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/types.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/doc.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.proto create mode 100644 vendor/k8s.io/api/autoscaling/v1/register.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/types.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/doc.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.proto create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/register.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/doc.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.proto create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/register.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/batch/v1/doc.go create mode 100644 vendor/k8s.io/api/batch/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/batch/v1/generated.proto create mode 100644 vendor/k8s.io/api/batch/v1/register.go create mode 100644 vendor/k8s.io/api/batch/v1/types.go create mode 100644 vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/batch/v1beta1/register.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/types.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/doc.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.proto create mode 100644 vendor/k8s.io/api/batch/v2alpha1/register.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/types.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/certificates/v1beta1/register.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/types.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/coordination/v1/doc.go create mode 100644 vendor/k8s.io/api/coordination/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/coordination/v1/generated.proto create mode 100644 vendor/k8s.io/api/coordination/v1/register.go create mode 100644 vendor/k8s.io/api/coordination/v1/types.go create mode 100644 vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/coordination/v1beta1/register.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/types.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/core/v1/annotation_key_constants.go create mode 100644 vendor/k8s.io/api/core/v1/doc.go create mode 100644 vendor/k8s.io/api/core/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/core/v1/generated.proto create mode 100644 vendor/k8s.io/api/core/v1/objectreference.go create mode 100644 vendor/k8s.io/api/core/v1/register.go create mode 100644 vendor/k8s.io/api/core/v1/resource.go create mode 100644 vendor/k8s.io/api/core/v1/taint.go create mode 100644 vendor/k8s.io/api/core/v1/toleration.go create mode 100644 vendor/k8s.io/api/core/v1/types.go create mode 100644 vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/core/v1/well_known_labels.go create mode 100644 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/events/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/events/v1beta1/register.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/extensions/v1beta1/register.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/types.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1/doc.go create mode 100644 vendor/k8s.io/api/networking/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/networking/v1/generated.proto create mode 100644 vendor/k8s.io/api/networking/v1/register.go create mode 100644 vendor/k8s.io/api/networking/v1/types.go create mode 100644 vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/networking/v1beta1/register.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/types.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/node/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1beta1/register.go create mode 100644 vendor/k8s.io/api/node/v1beta1/types.go create mode 100644 vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/policy/v1beta1/register.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/types.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/rbac/v1/doc.go create mode 100644 vendor/k8s.io/api/rbac/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/rbac/v1/generated.proto create mode 100644 vendor/k8s.io/api/rbac/v1/register.go create mode 100644 vendor/k8s.io/api/rbac/v1/types.go create mode 100644 vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/rbac/v1beta1/register.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/types.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/settings/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1/generated.proto create mode 100644 vendor/k8s.io/api/storage/v1/register.go create mode 100644 vendor/k8s.io/api/storage/v1/types.go create mode 100644 vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/storage/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/storage/v1beta1/register.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/types.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/LICENSE create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/help.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/priority.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/amount.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/math.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/converter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/helper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/fields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/requirements.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/selector.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/labels.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/selector.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/converter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/embedded.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/error.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/extension.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/helper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/selection/operator.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/namespacedname.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/nodename.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/patch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/uid.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/cache.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/clock/clock.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/diff/diff.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/framer/framer.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/json/json.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/http.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/interface.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_range.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_split.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/util.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/byte.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/empty.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int64.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/string.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/wait.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go create mode 100644 vendor/k8s.io/apimachinery/pkg/version/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/version/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/version/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/filter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/mux.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/watch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go create mode 100644 vendor/k8s.io/client-go/LICENSE create mode 100644 vendor/k8s.io/client-go/discovery/discovery_client.go create mode 100644 vendor/k8s.io/client-go/discovery/doc.go create mode 100644 vendor/k8s.io/client-go/discovery/fake/discovery.go create mode 100644 vendor/k8s.io/client-go/discovery/helper.go create mode 100644 vendor/k8s.io/client-go/kubernetes/clientset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go create mode 100644 vendor/k8s.io/client-go/kubernetes/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/fake/register.go create mode 100644 vendor/k8s.io/client-go/kubernetes/import.go create mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/register.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/fake/fake_lease.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_networkpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ingress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/fake/fake_runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/fake/fake_runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/fake/fake_scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csidriver.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_csinode.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/version/.gitattributes create mode 100644 vendor/k8s.io/client-go/pkg/version/base.go create mode 100644 vendor/k8s.io/client-go/pkg/version/def.bzl create mode 100644 vendor/k8s.io/client-go/pkg/version/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/version/version.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go create mode 100644 vendor/k8s.io/client-go/rest/OWNERS create mode 100644 vendor/k8s.io/client-go/rest/client.go create mode 100644 vendor/k8s.io/client-go/rest/config.go create mode 100644 vendor/k8s.io/client-go/rest/plugin.go create mode 100644 vendor/k8s.io/client-go/rest/request.go create mode 100644 vendor/k8s.io/client-go/rest/transport.go create mode 100644 vendor/k8s.io/client-go/rest/url_utils.go create mode 100644 vendor/k8s.io/client-go/rest/urlbackoff.go create mode 100644 vendor/k8s.io/client-go/rest/watch/decoder.go create mode 100644 vendor/k8s.io/client-go/rest/watch/encoder.go create mode 100644 vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/testing/actions.go create mode 100644 vendor/k8s.io/client-go/testing/fake.go create mode 100644 vendor/k8s.io/client-go/testing/fixture.go create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go create mode 100644 vendor/k8s.io/client-go/tools/auth/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/auth/clientauth.go create mode 100644 vendor/k8s.io/client-go/tools/cache/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/cache/controller.go create mode 100644 vendor/k8s.io/client-go/tools/cache/delta_fifo.go create mode 100644 vendor/k8s.io/client-go/tools/cache/doc.go create mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache.go create mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go create mode 100644 vendor/k8s.io/client-go/tools/cache/fake_custom_store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/fifo.go create mode 100644 vendor/k8s.io/client-go/tools/cache/heap.go create mode 100644 vendor/k8s.io/client-go/tools/cache/index.go create mode 100644 vendor/k8s.io/client-go/tools/cache/listers.go create mode 100644 vendor/k8s.io/client-go/tools/cache/listwatch.go create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_cache.go create mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_detector.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector.go create mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_metrics.go create mode 100644 vendor/k8s.io/client-go/tools/cache/shared_informer.go create mode 100644 vendor/k8s.io/client-go/tools/cache/store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/thread_safe_store.go create mode 100644 vendor/k8s.io/client-go/tools/cache/undelta_store.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/doc.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/register.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/types.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/client_config.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/config.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/doc.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/flag.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/helpers.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/loader.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/overrides.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/validation.go create mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/metrics/metrics.go create mode 100644 vendor/k8s.io/client-go/tools/pager/pager.go create mode 100644 vendor/k8s.io/client-go/tools/reference/ref.go create mode 100644 vendor/k8s.io/client-go/transport/OWNERS create mode 100644 vendor/k8s.io/client-go/transport/cache.go create mode 100644 vendor/k8s.io/client-go/transport/config.go create mode 100644 vendor/k8s.io/client-go/transport/round_trippers.go create mode 100644 vendor/k8s.io/client-go/transport/token_source.go create mode 100644 vendor/k8s.io/client-go/transport/transport.go create mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS create mode 100644 vendor/k8s.io/client-go/util/cert/cert.go create mode 100644 vendor/k8s.io/client-go/util/cert/csr.go create mode 100644 vendor/k8s.io/client-go/util/cert/io.go create mode 100644 vendor/k8s.io/client-go/util/cert/pem.go create mode 100644 vendor/k8s.io/client-go/util/connrotation/connrotation.go create mode 100644 vendor/k8s.io/client-go/util/flowcontrol/backoff.go create mode 100644 vendor/k8s.io/client-go/util/flowcontrol/throttle.go create mode 100644 vendor/k8s.io/client-go/util/homedir/homedir.go create mode 100644 vendor/k8s.io/client-go/util/jsonpath/doc.go create mode 100644 vendor/k8s.io/client-go/util/jsonpath/jsonpath.go create mode 100644 vendor/k8s.io/client-go/util/jsonpath/node.go create mode 100644 vendor/k8s.io/client-go/util/jsonpath/parser.go create mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS create mode 100644 vendor/k8s.io/client-go/util/keyutil/key.go create mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS create mode 100644 vendor/k8s.io/client-go/util/retry/util.go create mode 100644 vendor/k8s.io/klog/.travis.yml create mode 100644 vendor/k8s.io/klog/CONTRIBUTING.md create mode 100644 vendor/k8s.io/klog/LICENSE create mode 100644 vendor/k8s.io/klog/OWNERS create mode 100644 vendor/k8s.io/klog/README.md create mode 100644 vendor/k8s.io/klog/RELEASE.md create mode 100644 vendor/k8s.io/klog/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/klog/code-of-conduct.md create mode 100644 vendor/k8s.io/klog/go.mod create mode 100644 vendor/k8s.io/klog/go.sum create mode 100644 vendor/k8s.io/klog/klog.go create mode 100644 vendor/k8s.io/klog/klog_file.go create mode 100644 vendor/k8s.io/kube-openapi/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/document.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go create mode 100644 vendor/k8s.io/utils/LICENSE create mode 100644 vendor/k8s.io/utils/buffer/ring_growing.go create mode 100644 vendor/k8s.io/utils/integer/integer.go create mode 100644 vendor/k8s.io/utils/trace/trace.go create mode 100644 vendor/modules.txt create mode 100644 vendor/sigs.k8s.io/yaml/.gitignore create mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml create mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md create mode 100644 vendor/sigs.k8s.io/yaml/LICENSE create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/README.md create mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md create mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS create mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md create mode 100644 vendor/sigs.k8s.io/yaml/fields.go create mode 100644 vendor/sigs.k8s.io/yaml/yaml.go create mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/go.mod b/go.mod index 92f36b2e6a6..05378cbc9c3 100644 --- a/go.mod +++ b/go.mod @@ -3,53 +3,36 @@ module github.com/coredns/coredns go 1.12 require ( - cloud.google.com/go v0.41.0 // indirect github.com/Azure/azure-sdk-for-go v32.6.0+incompatible github.com/Azure/go-autorest/autorest v0.9.3 github.com/Azure/go-autorest/autorest/azure/auth v0.4.1 - github.com/DataDog/datadog-go v2.2.0+incompatible // indirect - github.com/Shopify/sarama v1.21.0 // indirect - github.com/apache/thrift v0.13.0 // indirect github.com/aws/aws-sdk-go v1.25.48 github.com/caddyserver/caddy v1.0.4 + github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/coredns/federation v0.0.0-20190818181423-e032b096babe - github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect - github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 - github.com/evanphx/json-patch v4.1.0+incompatible // indirect github.com/farsightsec/golang-framestream v0.0.0-20181102145529-8a0cb8ba8710 - github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect github.com/golang/protobuf v1.3.2 - github.com/googleapis/gnostic v0.2.0 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 - github.com/imdario/mergo v0.3.7 // indirect github.com/infobloxopen/go-trees v0.0.0-20190313150506-2af4e13f9062 github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/miekg/dns v1.1.25 - github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/openshift/coredns-mdns v0.0.0-20200122115902-259b209eea6a github.com/opentracing/opentracing-go v1.1.0 github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 - github.com/philhofer/fwd v1.0.0 // indirect github.com/prometheus/client_golang v1.2.1 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/common v0.7.0 - github.com/spf13/pflag v1.0.3 // indirect - github.com/tinylib/msgp v1.1.0 // indirect - github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20190917205325-a14579fbfb1a golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 - golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 - gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190620084959-7cf5895f2711 k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719 k8s.io/client-go v0.0.0-20190620085101-78d2af792bab k8s.io/klog v0.4.0 - k8s.io/kube-openapi v0.0.0-20190306001800-15615b16d372 // indirect - k8s.io/utils v0.0.0-20190529001817-6999998975a7 // indirect ) replace ( diff --git a/go.sum b/go.sum index 5455e1f0660..01c785e7700 100644 --- a/go.sum +++ b/go.sum @@ -75,6 +75,10 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/caddyserver/caddy v1.0.4 h1:wwuGSkUHo6RZ3oMpeTt7J09WBB87X5o+IZN4dKehcQE= github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM= +github.com/celebdor/zeroconf v0.0.0-20190404095836-c328d57fca11 h1:VthumoLRWujBLOoUGiCRF1Uyks6lZBHWoxItmkSajqA= +github.com/celebdor/zeroconf v0.0.0-20190404095836-c328d57fca11/go.mod h1:u+uV1CvldRx3gLkxSL/4KfFGFRmdPWwFJ7lDwdFkCe4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -85,6 +89,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/cloudflare-go v0.10.2/go.mod h1:qhVI5MKwBGhdNU89ZRz2plgYutcJ5PCekLxXn56w6SY= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coredns/coredns v1.6.6/go.mod h1:Bdcnka9HmKGYj12ZIDF3lpQSfDHSsMc85Wj9xEyZUts= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe h1:ND08lR/TclI9W4dScCwdRESOacCCdF3FkuB5pBIOv1U= github.com/coredns/federation v0.0.0-20190818181423-e032b096babe/go.mod h1:MoqTEFX8GlnKkyq8eBCF94VzkNAOgjdlCJ+Pz/oCLPk= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= @@ -304,6 +309,8 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openshift/coredns-mdns v0.0.0-20200122115902-259b209eea6a h1:twcIn3o9X7RkDiQpGP64zpUHESQUwAfwgQ7Mvr0beM4= +github.com/openshift/coredns-mdns v0.0.0-20200122115902-259b209eea6a/go.mod h1:BLITL6gPQ/ou0UMHoA3YvFdAoh2UsV/YNgA7Sx9DMz4= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 00000000000..125b7033c96 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,513 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/googleapis/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", u, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 00000000000..af39a91e703 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 00000000000..2d1d72608c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go new file mode 100644 index 00000000000..bb47c7dee04 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns/models.go @@ -0,0 +1,138 @@ +// +build go1.9 + +// Copyright 2019 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This code was auto-generated by: +// github.com/Azure/azure-sdk-for-go/tools/profileBuilder + +package dns + +import ( + "context" + + original "github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns" +) + +const ( + DefaultBaseURI = original.DefaultBaseURI +) + +type RecordType = original.RecordType + +const ( + A RecordType = original.A + AAAA RecordType = original.AAAA + CAA RecordType = original.CAA + CNAME RecordType = original.CNAME + MX RecordType = original.MX + NS RecordType = original.NS + PTR RecordType = original.PTR + SOA RecordType = original.SOA + SRV RecordType = original.SRV + TXT RecordType = original.TXT +) + +type ZoneType = original.ZoneType + +const ( + Private ZoneType = original.Private + Public ZoneType = original.Public +) + +type ARecord = original.ARecord +type AaaaRecord = original.AaaaRecord +type BaseClient = original.BaseClient +type CaaRecord = original.CaaRecord +type CloudError = original.CloudError +type CloudErrorBody = original.CloudErrorBody +type CnameRecord = original.CnameRecord +type MxRecord = original.MxRecord +type NsRecord = original.NsRecord +type PtrRecord = original.PtrRecord +type RecordSet = original.RecordSet +type RecordSetListResult = original.RecordSetListResult +type RecordSetListResultIterator = original.RecordSetListResultIterator +type RecordSetListResultPage = original.RecordSetListResultPage +type RecordSetProperties = original.RecordSetProperties +type RecordSetUpdateParameters = original.RecordSetUpdateParameters +type RecordSetsClient = original.RecordSetsClient +type Resource = original.Resource +type ResourceReference = original.ResourceReference +type ResourceReferenceClient = original.ResourceReferenceClient +type ResourceReferenceRequest = original.ResourceReferenceRequest +type ResourceReferenceRequestProperties = original.ResourceReferenceRequestProperties +type ResourceReferenceResult = original.ResourceReferenceResult +type ResourceReferenceResultProperties = original.ResourceReferenceResultProperties +type SoaRecord = original.SoaRecord +type SrvRecord = original.SrvRecord +type SubResource = original.SubResource +type TxtRecord = original.TxtRecord +type Zone = original.Zone +type ZoneListResult = original.ZoneListResult +type ZoneListResultIterator = original.ZoneListResultIterator +type ZoneListResultPage = original.ZoneListResultPage +type ZoneProperties = original.ZoneProperties +type ZoneUpdate = original.ZoneUpdate +type ZonesClient = original.ZonesClient +type ZonesDeleteFuture = original.ZonesDeleteFuture + +func New(subscriptionID string) BaseClient { + return original.New(subscriptionID) +} +func NewRecordSetListResultIterator(page RecordSetListResultPage) RecordSetListResultIterator { + return original.NewRecordSetListResultIterator(page) +} +func NewRecordSetListResultPage(getNextPage func(context.Context, RecordSetListResult) (RecordSetListResult, error)) RecordSetListResultPage { + return original.NewRecordSetListResultPage(getNextPage) +} +func NewRecordSetsClient(subscriptionID string) RecordSetsClient { + return original.NewRecordSetsClient(subscriptionID) +} +func NewRecordSetsClientWithBaseURI(baseURI string, subscriptionID string) RecordSetsClient { + return original.NewRecordSetsClientWithBaseURI(baseURI, subscriptionID) +} +func NewResourceReferenceClient(subscriptionID string) ResourceReferenceClient { + return original.NewResourceReferenceClient(subscriptionID) +} +func NewResourceReferenceClientWithBaseURI(baseURI string, subscriptionID string) ResourceReferenceClient { + return original.NewResourceReferenceClientWithBaseURI(baseURI, subscriptionID) +} +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return original.NewWithBaseURI(baseURI, subscriptionID) +} +func NewZoneListResultIterator(page ZoneListResultPage) ZoneListResultIterator { + return original.NewZoneListResultIterator(page) +} +func NewZoneListResultPage(getNextPage func(context.Context, ZoneListResult) (ZoneListResult, error)) ZoneListResultPage { + return original.NewZoneListResultPage(getNextPage) +} +func NewZonesClient(subscriptionID string) ZonesClient { + return original.NewZonesClient(subscriptionID) +} +func NewZonesClientWithBaseURI(baseURI string, subscriptionID string) ZonesClient { + return original.NewZonesClientWithBaseURI(baseURI, subscriptionID) +} +func PossibleRecordTypeValues() []RecordType { + return original.PossibleRecordTypeValues() +} +func PossibleZoneTypeValues() []ZoneType { + return original.PossibleZoneTypeValues() +} +func UserAgent() string { + return original.UserAgent() + " profiles/latest" +} +func Version() string { + return original.Version() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go new file mode 100644 index 00000000000..cf17ca134ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/client.go @@ -0,0 +1,51 @@ +// Package dns implements the Azure ARM Dns service API version 2018-05-01. +// +// The DNS Management Client. +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Dns + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Dns. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go new file mode 100644 index 00000000000..b9b4c627c0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/models.go @@ -0,0 +1,943 @@ +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns" + +// RecordType enumerates the values for record type. +type RecordType string + +const ( + // A ... + A RecordType = "A" + // AAAA ... + AAAA RecordType = "AAAA" + // CAA ... + CAA RecordType = "CAA" + // CNAME ... + CNAME RecordType = "CNAME" + // MX ... + MX RecordType = "MX" + // NS ... + NS RecordType = "NS" + // PTR ... + PTR RecordType = "PTR" + // SOA ... + SOA RecordType = "SOA" + // SRV ... + SRV RecordType = "SRV" + // TXT ... + TXT RecordType = "TXT" +) + +// PossibleRecordTypeValues returns an array of possible values for the RecordType const type. +func PossibleRecordTypeValues() []RecordType { + return []RecordType{A, AAAA, CAA, CNAME, MX, NS, PTR, SOA, SRV, TXT} +} + +// ZoneType enumerates the values for zone type. +type ZoneType string + +const ( + // Private ... + Private ZoneType = "Private" + // Public ... + Public ZoneType = "Public" +) + +// PossibleZoneTypeValues returns an array of possible values for the ZoneType const type. +func PossibleZoneTypeValues() []ZoneType { + return []ZoneType{Private, Public} +} + +// AaaaRecord an AAAA record. +type AaaaRecord struct { + // Ipv6Address - The IPv6 address of this AAAA record. + Ipv6Address *string `json:"ipv6Address,omitempty"` +} + +// ARecord an A record. +type ARecord struct { + // Ipv4Address - The IPv4 address of this A record. + Ipv4Address *string `json:"ipv4Address,omitempty"` +} + +// CaaRecord a CAA record. +type CaaRecord struct { + // Flags - The flags for this CAA record as an integer between 0 and 255. + Flags *int32 `json:"flags,omitempty"` + // Tag - The tag for this CAA record. + Tag *string `json:"tag,omitempty"` + // Value - The value for this CAA record. + Value *string `json:"value,omitempty"` +} + +// CloudError an error message +type CloudError struct { + // Error - The error message body + Error *CloudErrorBody `json:"error,omitempty"` +} + +// CloudErrorBody the body of an error message +type CloudErrorBody struct { + // Code - The error code + Code *string `json:"code,omitempty"` + // Message - A description of what caused the error + Message *string `json:"message,omitempty"` + // Target - The target resource of the error message + Target *string `json:"target,omitempty"` + // Details - Extra error information + Details *[]CloudErrorBody `json:"details,omitempty"` +} + +// CnameRecord a CNAME record. +type CnameRecord struct { + // Cname - The canonical name for this CNAME record. + Cname *string `json:"cname,omitempty"` +} + +// MxRecord an MX record. +type MxRecord struct { + // Preference - The preference value for this MX record. + Preference *int32 `json:"preference,omitempty"` + // Exchange - The domain name of the mail host for this MX record. + Exchange *string `json:"exchange,omitempty"` +} + +// NsRecord an NS record. +type NsRecord struct { + // Nsdname - The name server name for this NS record. + Nsdname *string `json:"nsdname,omitempty"` +} + +// PtrRecord a PTR record. +type PtrRecord struct { + // Ptrdname - The PTR target domain name for this PTR record. + Ptrdname *string `json:"ptrdname,omitempty"` +} + +// RecordSet describes a DNS record set (a collection of DNS records with the same name and type). +type RecordSet struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The ID of the record set. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the record set. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the record set. + Type *string `json:"type,omitempty"` + // Etag - The etag of the record set. + Etag *string `json:"etag,omitempty"` + // RecordSetProperties - The properties of the record set. + *RecordSetProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for RecordSet. +func (rs RecordSet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rs.Etag != nil { + objectMap["etag"] = rs.Etag + } + if rs.RecordSetProperties != nil { + objectMap["properties"] = rs.RecordSetProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for RecordSet struct. +func (rs *RecordSet) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rs.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + rs.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + rs.Type = &typeVar + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + rs.Etag = &etag + } + case "properties": + if v != nil { + var recordSetProperties RecordSetProperties + err = json.Unmarshal(*v, &recordSetProperties) + if err != nil { + return err + } + rs.RecordSetProperties = &recordSetProperties + } + } + } + + return nil +} + +// RecordSetListResult the response to a record set List operation. +type RecordSetListResult struct { + autorest.Response `json:"-"` + // Value - Information about the record sets in the response. + Value *[]RecordSet `json:"value,omitempty"` + // NextLink - READ-ONLY; The continuation token for the next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// RecordSetListResultIterator provides access to a complete listing of RecordSet values. +type RecordSetListResultIterator struct { + i int + page RecordSetListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RecordSetListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *RecordSetListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RecordSetListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RecordSetListResultIterator) Response() RecordSetListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RecordSetListResultIterator) Value() RecordSet { + if !iter.page.NotDone() { + return RecordSet{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the RecordSetListResultIterator type. +func NewRecordSetListResultIterator(page RecordSetListResultPage) RecordSetListResultIterator { + return RecordSetListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rslr RecordSetListResult) IsEmpty() bool { + return rslr.Value == nil || len(*rslr.Value) == 0 +} + +// recordSetListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rslr RecordSetListResult) recordSetListResultPreparer(ctx context.Context) (*http.Request, error) { + if rslr.NextLink == nil || len(to.String(rslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rslr.NextLink))) +} + +// RecordSetListResultPage contains a page of RecordSet values. +type RecordSetListResultPage struct { + fn func(context.Context, RecordSetListResult) (RecordSetListResult, error) + rslr RecordSetListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RecordSetListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rslr) + if err != nil { + return err + } + page.rslr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RecordSetListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RecordSetListResultPage) NotDone() bool { + return !page.rslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RecordSetListResultPage) Response() RecordSetListResult { + return page.rslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RecordSetListResultPage) Values() []RecordSet { + if page.rslr.IsEmpty() { + return nil + } + return *page.rslr.Value +} + +// Creates a new instance of the RecordSetListResultPage type. +func NewRecordSetListResultPage(getNextPage func(context.Context, RecordSetListResult) (RecordSetListResult, error)) RecordSetListResultPage { + return RecordSetListResultPage{fn: getNextPage} +} + +// RecordSetProperties represents the properties of the records in the record set. +type RecordSetProperties struct { + // Metadata - The metadata attached to the record set. + Metadata map[string]*string `json:"metadata"` + // TTL - The TTL (time-to-live) of the records in the record set. + TTL *int64 `json:"TTL,omitempty"` + // Fqdn - READ-ONLY; Fully qualified domain name of the record set. + Fqdn *string `json:"fqdn,omitempty"` + // ProvisioningState - READ-ONLY; provisioning State of the record set. + ProvisioningState *string `json:"provisioningState,omitempty"` + // TargetResource - A reference to an azure resource from where the dns resource value is taken. + TargetResource *SubResource `json:"targetResource,omitempty"` + // ARecords - The list of A records in the record set. + ARecords *[]ARecord `json:"ARecords,omitempty"` + // AaaaRecords - The list of AAAA records in the record set. + AaaaRecords *[]AaaaRecord `json:"AAAARecords,omitempty"` + // MxRecords - The list of MX records in the record set. + MxRecords *[]MxRecord `json:"MXRecords,omitempty"` + // NsRecords - The list of NS records in the record set. + NsRecords *[]NsRecord `json:"NSRecords,omitempty"` + // PtrRecords - The list of PTR records in the record set. + PtrRecords *[]PtrRecord `json:"PTRRecords,omitempty"` + // SrvRecords - The list of SRV records in the record set. + SrvRecords *[]SrvRecord `json:"SRVRecords,omitempty"` + // TxtRecords - The list of TXT records in the record set. + TxtRecords *[]TxtRecord `json:"TXTRecords,omitempty"` + // CnameRecord - The CNAME record in the record set. + CnameRecord *CnameRecord `json:"CNAMERecord,omitempty"` + // SoaRecord - The SOA record in the record set. + SoaRecord *SoaRecord `json:"SOARecord,omitempty"` + // CaaRecords - The list of CAA records in the record set. + CaaRecords *[]CaaRecord `json:"caaRecords,omitempty"` +} + +// MarshalJSON is the custom marshaler for RecordSetProperties. +func (rsp RecordSetProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rsp.Metadata != nil { + objectMap["metadata"] = rsp.Metadata + } + if rsp.TTL != nil { + objectMap["TTL"] = rsp.TTL + } + if rsp.TargetResource != nil { + objectMap["targetResource"] = rsp.TargetResource + } + if rsp.ARecords != nil { + objectMap["ARecords"] = rsp.ARecords + } + if rsp.AaaaRecords != nil { + objectMap["AAAARecords"] = rsp.AaaaRecords + } + if rsp.MxRecords != nil { + objectMap["MXRecords"] = rsp.MxRecords + } + if rsp.NsRecords != nil { + objectMap["NSRecords"] = rsp.NsRecords + } + if rsp.PtrRecords != nil { + objectMap["PTRRecords"] = rsp.PtrRecords + } + if rsp.SrvRecords != nil { + objectMap["SRVRecords"] = rsp.SrvRecords + } + if rsp.TxtRecords != nil { + objectMap["TXTRecords"] = rsp.TxtRecords + } + if rsp.CnameRecord != nil { + objectMap["CNAMERecord"] = rsp.CnameRecord + } + if rsp.SoaRecord != nil { + objectMap["SOARecord"] = rsp.SoaRecord + } + if rsp.CaaRecords != nil { + objectMap["caaRecords"] = rsp.CaaRecords + } + return json.Marshal(objectMap) +} + +// RecordSetUpdateParameters parameters supplied to update a record set. +type RecordSetUpdateParameters struct { + // RecordSet - Specifies information about the record set being updated. + RecordSet *RecordSet `json:"RecordSet,omitempty"` +} + +// Resource common properties of an Azure Resource Manager resource +type Resource struct { + // ID - READ-ONLY; Resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// ResourceReference represents a single Azure resource and its referencing DNS records. +type ResourceReference struct { + // DNSResources - A list of dns Records + DNSResources *[]SubResource `json:"dnsResources,omitempty"` + // TargetResource - A reference to an azure resource from where the dns resource value is taken. + TargetResource *SubResource `json:"targetResource,omitempty"` +} + +// ResourceReferenceRequest represents the properties of the Dns Resource Reference Request. +type ResourceReferenceRequest struct { + // ResourceReferenceRequestProperties - The properties of the Resource Reference Request. + *ResourceReferenceRequestProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceReferenceRequest. +func (rrr ResourceReferenceRequest) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rrr.ResourceReferenceRequestProperties != nil { + objectMap["properties"] = rrr.ResourceReferenceRequestProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ResourceReferenceRequest struct. +func (rrr *ResourceReferenceRequest) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var resourceReferenceRequestProperties ResourceReferenceRequestProperties + err = json.Unmarshal(*v, &resourceReferenceRequestProperties) + if err != nil { + return err + } + rrr.ResourceReferenceRequestProperties = &resourceReferenceRequestProperties + } + } + } + + return nil +} + +// ResourceReferenceRequestProperties represents the properties of the Dns Resource Reference Request. +type ResourceReferenceRequestProperties struct { + // TargetResources - A list of references to azure resources for which referencing dns records need to be queried. + TargetResources *[]SubResource `json:"targetResources,omitempty"` +} + +// ResourceReferenceResult represents the properties of the Dns Resource Reference Result. +type ResourceReferenceResult struct { + autorest.Response `json:"-"` + // ResourceReferenceResultProperties - The result of dns resource reference request. Returns a list of dns resource references for each of the azure resource in the request. + *ResourceReferenceResultProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for ResourceReferenceResult. +func (rrr ResourceReferenceResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rrr.ResourceReferenceResultProperties != nil { + objectMap["properties"] = rrr.ResourceReferenceResultProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ResourceReferenceResult struct. +func (rrr *ResourceReferenceResult) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var resourceReferenceResultProperties ResourceReferenceResultProperties + err = json.Unmarshal(*v, &resourceReferenceResultProperties) + if err != nil { + return err + } + rrr.ResourceReferenceResultProperties = &resourceReferenceResultProperties + } + } + } + + return nil +} + +// ResourceReferenceResultProperties the result of dns resource reference request. Returns a list of dns +// resource references for each of the azure resource in the request. +type ResourceReferenceResultProperties struct { + // DNSResourceReferences - The result of dns resource reference request. A list of dns resource references for each of the azure resource in the request + DNSResourceReferences *[]ResourceReference `json:"dnsResourceReferences,omitempty"` +} + +// SoaRecord an SOA record. +type SoaRecord struct { + // Host - The domain name of the authoritative name server for this SOA record. + Host *string `json:"host,omitempty"` + // Email - The email contact for this SOA record. + Email *string `json:"email,omitempty"` + // SerialNumber - The serial number for this SOA record. + SerialNumber *int64 `json:"serialNumber,omitempty"` + // RefreshTime - The refresh value for this SOA record. + RefreshTime *int64 `json:"refreshTime,omitempty"` + // RetryTime - The retry time for this SOA record. + RetryTime *int64 `json:"retryTime,omitempty"` + // ExpireTime - The expire time for this SOA record. + ExpireTime *int64 `json:"expireTime,omitempty"` + // MinimumTTL - The minimum value for this SOA record. By convention this is used to determine the negative caching duration. + MinimumTTL *int64 `json:"minimumTTL,omitempty"` +} + +// SrvRecord an SRV record. +type SrvRecord struct { + // Priority - The priority value for this SRV record. + Priority *int32 `json:"priority,omitempty"` + // Weight - The weight value for this SRV record. + Weight *int32 `json:"weight,omitempty"` + // Port - The port value for this SRV record. + Port *int32 `json:"port,omitempty"` + // Target - The target domain name for this SRV record. + Target *string `json:"target,omitempty"` +} + +// SubResource a reference to a another resource +type SubResource struct { + // ID - Resource Id. + ID *string `json:"id,omitempty"` +} + +// TxtRecord a TXT record. +type TxtRecord struct { + // Value - The text value of this TXT record. + Value *[]string `json:"value,omitempty"` +} + +// Zone describes a DNS zone. +type Zone struct { + autorest.Response `json:"-"` + // Etag - The etag of the zone. + Etag *string `json:"etag,omitempty"` + // ZoneProperties - The properties of the zone. + *ZoneProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource ID. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Zone. +func (z Zone) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if z.Etag != nil { + objectMap["etag"] = z.Etag + } + if z.ZoneProperties != nil { + objectMap["properties"] = z.ZoneProperties + } + if z.Location != nil { + objectMap["location"] = z.Location + } + if z.Tags != nil { + objectMap["tags"] = z.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Zone struct. +func (z *Zone) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + z.Etag = &etag + } + case "properties": + if v != nil { + var zoneProperties ZoneProperties + err = json.Unmarshal(*v, &zoneProperties) + if err != nil { + return err + } + z.ZoneProperties = &zoneProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + z.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + z.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + z.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + z.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + z.Tags = tags + } + } + } + + return nil +} + +// ZoneListResult the response to a Zone List or ListAll operation. +type ZoneListResult struct { + autorest.Response `json:"-"` + // Value - Information about the DNS zones. + Value *[]Zone `json:"value,omitempty"` + // NextLink - READ-ONLY; The continuation token for the next page of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ZoneListResultIterator provides access to a complete listing of Zone values. +type ZoneListResultIterator struct { + i int + page ZoneListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ZoneListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZoneListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ZoneListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ZoneListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ZoneListResultIterator) Response() ZoneListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ZoneListResultIterator) Value() Zone { + if !iter.page.NotDone() { + return Zone{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ZoneListResultIterator type. +func NewZoneListResultIterator(page ZoneListResultPage) ZoneListResultIterator { + return ZoneListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (zlr ZoneListResult) IsEmpty() bool { + return zlr.Value == nil || len(*zlr.Value) == 0 +} + +// zoneListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (zlr ZoneListResult) zoneListResultPreparer(ctx context.Context) (*http.Request, error) { + if zlr.NextLink == nil || len(to.String(zlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(zlr.NextLink))) +} + +// ZoneListResultPage contains a page of Zone values. +type ZoneListResultPage struct { + fn func(context.Context, ZoneListResult) (ZoneListResult, error) + zlr ZoneListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ZoneListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZoneListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.zlr) + if err != nil { + return err + } + page.zlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ZoneListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ZoneListResultPage) NotDone() bool { + return !page.zlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ZoneListResultPage) Response() ZoneListResult { + return page.zlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ZoneListResultPage) Values() []Zone { + if page.zlr.IsEmpty() { + return nil + } + return *page.zlr.Value +} + +// Creates a new instance of the ZoneListResultPage type. +func NewZoneListResultPage(getNextPage func(context.Context, ZoneListResult) (ZoneListResult, error)) ZoneListResultPage { + return ZoneListResultPage{fn: getNextPage} +} + +// ZoneProperties represents the properties of the zone. +type ZoneProperties struct { + // MaxNumberOfRecordSets - READ-ONLY; The maximum number of record sets that can be created in this DNS zone. This is a read-only property and any attempt to set this value will be ignored. + MaxNumberOfRecordSets *int64 `json:"maxNumberOfRecordSets,omitempty"` + // NumberOfRecordSets - READ-ONLY; The current number of record sets in this DNS zone. This is a read-only property and any attempt to set this value will be ignored. + NumberOfRecordSets *int64 `json:"numberOfRecordSets,omitempty"` + // NameServers - READ-ONLY; The name servers for this DNS zone. This is a read-only property and any attempt to set this value will be ignored. + NameServers *[]string `json:"nameServers,omitempty"` + // ZoneType - The type of this DNS zone (Public or Private). Possible values include: 'Public', 'Private' + ZoneType ZoneType `json:"zoneType,omitempty"` + // RegistrationVirtualNetworks - A list of references to virtual networks that register hostnames in this DNS zone. This is a only when ZoneType is Private. + RegistrationVirtualNetworks *[]SubResource `json:"registrationVirtualNetworks,omitempty"` + // ResolutionVirtualNetworks - A list of references to virtual networks that resolve records in this DNS zone. This is a only when ZoneType is Private. + ResolutionVirtualNetworks *[]SubResource `json:"resolutionVirtualNetworks,omitempty"` +} + +// ZonesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ZonesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ZonesDeleteFuture) Result(client ZonesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("dns.ZonesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ZoneUpdate describes a request to update a DNS zone. +type ZoneUpdate struct { + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ZoneUpdate. +func (zu ZoneUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if zu.Tags != nil { + objectMap["tags"] = zu.Tags + } + return json.Marshal(objectMap) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go new file mode 100644 index 00000000000..a54a79fa17d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/recordsets.go @@ -0,0 +1,779 @@ +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RecordSetsClient is the the DNS Management Client. +type RecordSetsClient struct { + BaseClient +} + +// NewRecordSetsClient creates an instance of the RecordSetsClient client. +func NewRecordSetsClient(subscriptionID string) RecordSetsClient { + return NewRecordSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRecordSetsClientWithBaseURI creates an instance of the RecordSetsClient client. +func NewRecordSetsClientWithBaseURI(baseURI string, subscriptionID string) RecordSetsClient { + return RecordSetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a record set within a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// relativeRecordSetName - the name of the record set, relative to the name of the zone. +// recordType - the type of DNS record in this record set. Record sets of type SOA can be updated but not +// created (they are created when the DNS zone is created). +// parameters - parameters supplied to the CreateOrUpdate operation. +// ifMatch - the etag of the record set. Omit this value to always overwrite the current record set. Specify +// the last-seen etag value to prevent accidentally overwriting any concurrent changes. +// ifNoneMatch - set to '*' to allow a new record set to be created, but to prevent updating an existing record +// set. Other values will be ignored. +func (client RecordSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (result RecordSet, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch, ifNoneMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RecordSetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string, ifNoneMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + parameters.Name = nil + parameters.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) CreateOrUpdateResponder(resp *http.Response) (result RecordSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a record set from a DNS zone. This operation cannot be undone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// relativeRecordSetName - the name of the record set, relative to the name of the zone. +// recordType - the type of DNS record in this record set. Record sets of type SOA cannot be deleted (they are +// deleted when the DNS zone is deleted). +// ifMatch - the etag of the record set. Omit this value to always delete the current record set. Specify the +// last-seen etag value to prevent accidentally deleting any concurrent changes. +func (client RecordSetsClient) Delete(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RecordSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a record set. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// relativeRecordSetName - the name of the record set, relative to the name of the zone. +// recordType - the type of DNS record in this record set. +func (client RecordSetsClient) Get(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (result RecordSet, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RecordSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) GetResponder(resp *http.Response) (result RecordSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllByDNSZone lists all record sets in a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. +// recordSetNameSuffix - the suffix label of the record set name that has to be used to filter the record set +// enumerations. If this parameter is specified, Enumeration will return only records that end with +// . +func (client RecordSetsClient) ListAllByDNSZone(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordSetNameSuffix string) (result RecordSetListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListAllByDNSZone") + defer func() { + sc := -1 + if result.rslr.Response.Response != nil { + sc = result.rslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllByDNSZoneNextResults + req, err := client.ListAllByDNSZonePreparer(ctx, resourceGroupName, zoneName, top, recordSetNameSuffix) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllByDNSZone", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllByDNSZoneSender(req) + if err != nil { + result.rslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllByDNSZone", resp, "Failure sending request") + return + } + + result.rslr, err = client.ListAllByDNSZoneResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListAllByDNSZone", resp, "Failure responding to request") + } + + return +} + +// ListAllByDNSZonePreparer prepares the ListAllByDNSZone request. +func (client RecordSetsClient) ListAllByDNSZonePreparer(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordSetNameSuffix string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(recordSetNameSuffix) > 0 { + queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordSetNameSuffix) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/all", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllByDNSZoneSender sends the ListAllByDNSZone request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) ListAllByDNSZoneSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListAllByDNSZoneResponder handles the response to the ListAllByDNSZone request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) ListAllByDNSZoneResponder(resp *http.Response) (result RecordSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllByDNSZoneNextResults retrieves the next set of results, if any. +func (client RecordSetsClient) listAllByDNSZoneNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) { + req, err := lastResults.recordSetListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listAllByDNSZoneNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllByDNSZoneSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listAllByDNSZoneNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllByDNSZoneResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listAllByDNSZoneNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllByDNSZoneComplete enumerates all values, automatically crossing page boundaries as required. +func (client RecordSetsClient) ListAllByDNSZoneComplete(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordSetNameSuffix string) (result RecordSetListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListAllByDNSZone") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAllByDNSZone(ctx, resourceGroupName, zoneName, top, recordSetNameSuffix) + return +} + +// ListByDNSZone lists all record sets in a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. +// recordsetnamesuffix - the suffix label of the record set name that has to be used to filter the record set +// enumerations. If this parameter is specified, Enumeration will return only records that end with +// . +func (client RecordSetsClient) ListByDNSZone(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByDNSZone") + defer func() { + sc := -1 + if result.rslr.Response.Response != nil { + sc = result.rslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByDNSZoneNextResults + req, err := client.ListByDNSZonePreparer(ctx, resourceGroupName, zoneName, top, recordsetnamesuffix) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", nil, "Failure preparing request") + return + } + + resp, err := client.ListByDNSZoneSender(req) + if err != nil { + result.rslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure sending request") + return + } + + result.rslr, err = client.ListByDNSZoneResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByDNSZone", resp, "Failure responding to request") + } + + return +} + +// ListByDNSZonePreparer prepares the ListByDNSZone request. +func (client RecordSetsClient) ListByDNSZonePreparer(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(recordsetnamesuffix) > 0 { + queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/recordsets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByDNSZoneSender sends the ListByDNSZone request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) ListByDNSZoneSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByDNSZoneResponder handles the response to the ListByDNSZone request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) ListByDNSZoneResponder(resp *http.Response) (result RecordSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByDNSZoneNextResults retrieves the next set of results, if any. +func (client RecordSetsClient) listByDNSZoneNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) { + req, err := lastResults.recordSetListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByDNSZoneNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByDNSZoneSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByDNSZoneNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByDNSZoneResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByDNSZoneNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByDNSZoneComplete enumerates all values, automatically crossing page boundaries as required. +func (client RecordSetsClient) ListByDNSZoneComplete(ctx context.Context, resourceGroupName string, zoneName string, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByDNSZone") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByDNSZone(ctx, resourceGroupName, zoneName, top, recordsetnamesuffix) + return +} + +// ListByType lists the record sets of a specified type in a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// recordType - the type of record sets to enumerate. +// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. +// recordsetnamesuffix - the suffix label of the record set name that has to be used to filter the record set +// enumerations. If this parameter is specified, Enumeration will return only records that end with +// . +func (client RecordSetsClient) ListByType(ctx context.Context, resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType") + defer func() { + sc := -1 + if result.rslr.Response.Response != nil { + sc = result.rslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByTypeNextResults + req, err := client.ListByTypePreparer(ctx, resourceGroupName, zoneName, recordType, top, recordsetnamesuffix) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", nil, "Failure preparing request") + return + } + + resp, err := client.ListByTypeSender(req) + if err != nil { + result.rslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure sending request") + return + } + + result.rslr, err = client.ListByTypeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "ListByType", resp, "Failure responding to request") + } + + return +} + +// ListByTypePreparer prepares the ListByType request. +func (client RecordSetsClient) ListByTypePreparer(ctx context.Context, resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(recordsetnamesuffix) > 0 { + queryParameters["$recordsetnamesuffix"] = autorest.Encode("query", recordsetnamesuffix) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByTypeSender sends the ListByType request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) ListByTypeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByTypeResponder handles the response to the ListByType request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) ListByTypeResponder(resp *http.Response) (result RecordSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByTypeNextResults retrieves the next set of results, if any. +func (client RecordSetsClient) listByTypeNextResults(ctx context.Context, lastResults RecordSetListResult) (result RecordSetListResult, err error) { + req, err := lastResults.recordSetListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByTypeNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByTypeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByTypeNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByTypeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "listByTypeNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByTypeComplete enumerates all values, automatically crossing page boundaries as required. +func (client RecordSetsClient) ListByTypeComplete(ctx context.Context, resourceGroupName string, zoneName string, recordType RecordType, top *int32, recordsetnamesuffix string) (result RecordSetListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.ListByType") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByType(ctx, resourceGroupName, zoneName, recordType, top, recordsetnamesuffix) + return +} + +// Update updates a record set within a DNS zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// relativeRecordSetName - the name of the record set, relative to the name of the zone. +// recordType - the type of DNS record in this record set. +// parameters - parameters supplied to the Update operation. +// ifMatch - the etag of the record set. Omit this value to always overwrite the current record set. Specify +// the last-seen etag value to prevent accidentally overwriting concurrent changes. +func (client RecordSetsClient) Update(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string) (result RecordSet, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RecordSetsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, parameters, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.RecordSetsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client RecordSetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType RecordType, parameters RecordSet, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "recordType": autorest.Encode("path", recordType), + "relativeRecordSetName": relativeRecordSetName, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + parameters.Name = nil + parameters.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}/{recordType}/{relativeRecordSetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client RecordSetsClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client RecordSetsClient) UpdateResponder(resp *http.Response) (result RecordSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go new file mode 100644 index 00000000000..35f01ca41b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/resourcereference.go @@ -0,0 +1,117 @@ +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ResourceReferenceClient is the the DNS Management Client. +type ResourceReferenceClient struct { + BaseClient +} + +// NewResourceReferenceClient creates an instance of the ResourceReferenceClient client. +func NewResourceReferenceClient(subscriptionID string) ResourceReferenceClient { + return NewResourceReferenceClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewResourceReferenceClientWithBaseURI creates an instance of the ResourceReferenceClient client. +func NewResourceReferenceClientWithBaseURI(baseURI string, subscriptionID string) ResourceReferenceClient { + return ResourceReferenceClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetByTargetResources returns the DNS records specified by the referencing targetResourceIds. +// Parameters: +// parameters - properties for dns resource reference request. +func (client ResourceReferenceClient) GetByTargetResources(ctx context.Context, parameters ResourceReferenceRequest) (result ResourceReferenceResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ResourceReferenceClient.GetByTargetResources") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetByTargetResourcesPreparer(ctx, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ResourceReferenceClient", "GetByTargetResources", nil, "Failure preparing request") + return + } + + resp, err := client.GetByTargetResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ResourceReferenceClient", "GetByTargetResources", resp, "Failure sending request") + return + } + + result, err = client.GetByTargetResourcesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ResourceReferenceClient", "GetByTargetResources", resp, "Failure responding to request") + } + + return +} + +// GetByTargetResourcesPreparer prepares the GetByTargetResources request. +func (client ResourceReferenceClient) GetByTargetResourcesPreparer(ctx context.Context, parameters ResourceReferenceRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/getDnsResourceReference", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetByTargetResourcesSender sends the GetByTargetResources request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceReferenceClient) GetByTargetResourcesSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetByTargetResourcesResponder handles the response to the GetByTargetResources request. The method always +// closes the http.Response Body. +func (client ResourceReferenceClient) GetByTargetResourcesResponder(resp *http.Response) (result ResourceReferenceResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go new file mode 100644 index 00000000000..7762623de8c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/version.go @@ -0,0 +1,30 @@ +package dns + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " dns/2018-05-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go new file mode 100644 index 00000000000..de1ede07511 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/dns/mgmt/2018-05-01/dns/zones.go @@ -0,0 +1,611 @@ +package dns + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ZonesClient is the the DNS Management Client. +type ZonesClient struct { + BaseClient +} + +// NewZonesClient creates an instance of the ZonesClient client. +func NewZonesClient(subscriptionID string) ZonesClient { + return NewZonesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewZonesClientWithBaseURI creates an instance of the ZonesClient client. +func NewZonesClientWithBaseURI(baseURI string, subscriptionID string) ZonesClient { + return ZonesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a DNS zone. Does not modify DNS records within the zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// parameters - parameters supplied to the CreateOrUpdate operation. +// ifMatch - the etag of the DNS zone. Omit this value to always overwrite the current zone. Specify the +// last-seen etag value to prevent accidentally overwriting any concurrent changes. +// ifNoneMatch - set to '*' to allow a new DNS zone to be created, but to prevent updating an existing zone. +// Other values will be ignored. +func (client ZonesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (result Zone, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, zoneName, parameters, ifMatch, ifNoneMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ZonesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, parameters Zone, ifMatch string, ifNoneMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + if len(ifNoneMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-None-Match", autorest.String(ifNoneMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ZonesClient) CreateOrUpdateResponder(resp *http.Response) (result Zone, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a DNS zone. WARNING: All DNS records in the zone will also be deleted. This operation cannot be +// undone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// ifMatch - the etag of the DNS zone. Omit this value to always delete the current zone. Specify the last-seen +// etag value to prevent accidentally deleting any concurrent changes. +func (client ZonesClient) Delete(ctx context.Context, resourceGroupName string, zoneName string, ifMatch string) (result ZonesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, zoneName, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ZonesClient) DeletePreparer(ctx context.Context, resourceGroupName string, zoneName string, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) DeleteSender(req *http.Request) (future ZonesDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ZonesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a DNS zone. Retrieves the zone properties, but not the record sets within the zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +func (client ZonesClient) Get(ctx context.Context, resourceGroupName string, zoneName string) (result Zone, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, zoneName) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ZonesClient) GetPreparer(ctx context.Context, resourceGroupName string, zoneName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ZonesClient) GetResponder(resp *http.Response) (result Zone, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the DNS zones in all resource groups in a subscription. +// Parameters: +// top - the maximum number of DNS zones to return. If not specified, returns up to 100 zones. +func (client ZonesClient) List(ctx context.Context, top *int32) (result ZoneListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.List") + defer func() { + sc := -1 + if result.zlr.Response.Response != nil { + sc = result.zlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, top) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.zlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure sending request") + return + } + + result.zlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ZonesClient) ListPreparer(ctx context.Context, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/dnszones", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ZonesClient) ListResponder(resp *http.Response) (result ZoneListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ZonesClient) listNextResults(ctx context.Context, lastResults ZoneListResult) (result ZoneListResult, err error) { + req, err := lastResults.zoneListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ZonesClient) ListComplete(ctx context.Context, top *int32) (result ZoneListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, top) + return +} + +// ListByResourceGroup lists the DNS zones within a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// top - the maximum number of record sets to return. If not specified, returns up to 100 record sets. +func (client ZonesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, top *int32) (result ZoneListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.zlr.Response.Response != nil { + sc = result.zlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, top) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.zlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.zlr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ZonesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ZonesClient) ListByResourceGroupResponder(resp *http.Response) (result ZoneListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client ZonesClient) listByResourceGroupNextResults(ctx context.Context, lastResults ZoneListResult) (result ZoneListResult, err error) { + req, err := lastResults.zoneListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client ZonesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, top *int32) (result ZoneListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, top) + return +} + +// Update updates a DNS zone. Does not modify DNS records within the zone. +// Parameters: +// resourceGroupName - the name of the resource group. +// zoneName - the name of the DNS zone (without a terminating dot). +// parameters - parameters supplied to the Update operation. +// ifMatch - the etag of the DNS zone. Omit this value to always overwrite the current zone. Specify the +// last-seen etag value to prevent accidentally overwriting any concurrent changes. +func (client ZonesClient) Update(ctx context.Context, resourceGroupName string, zoneName string, parameters ZoneUpdate, ifMatch string) (result Zone, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ZonesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, zoneName, parameters, ifMatch) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "dns.ZonesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ZonesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, zoneName string, parameters ZoneUpdate, ifMatch string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "zoneName": autorest.Encode("path", zoneName), + } + + const APIVersion = "2018-05-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dnsZones/{zoneName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + if len(ifMatch) > 0 { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithHeader("If-Match", autorest.String(ifMatch))) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ZonesClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ZonesClient) UpdateResponder(resp *http.Response) (result Zone, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go new file mode 100644 index 00000000000..b7139293081 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -0,0 +1,21 @@ +package version + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Number contains the semantic version of this SDK. +const Number = "v32.6.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 00000000000..fec416a9c41 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 00000000000..fa5964742fc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 00000000000..914f8af5e4e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,269 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 00000000000..fdc5b90ca5c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.0 + github.com/Azure/go-autorest/autorest/date v0.2.0 + github.com/Azure/go-autorest/autorest/mocks v0.3.0 + github.com/Azure/go-autorest/tracing v0.5.0 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 00000000000..f0a018563b5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,23 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 00000000000..28a4bfc4c43 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 00000000000..9e15f2751f2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,73 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 00000000000..d7e4372bbc5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,95 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net/http" + "net/http/cookiejar" + "sync" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 00000000000..7c7fca3718f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1112 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + asMSIEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + asMSISecretEnv = "MSI_SECRET" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +func isAppService() bool { + _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv) + + return asMSIEndpointEnvExists && asMSISecretEnvExists +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions +func GetMSIAppServiceEndpoint() (string, error) { + asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) + + if asMSIEndpointEnvExists { + return asMSIEndpoint, nil + } + return "", errors.New("MSI endpoint not found") +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +func GetMSIEndpoint() (string, error) { + if isAppService() { + return GetMSIAppServiceEndpoint() + } + return GetMSIVMEndpoint() +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + // App Service MSI currently only supports token API version 2017-09-01 + if isAppService() { + v.Set("api-version", "2017-09-01") + } else { + v.Set("api-version", "2018-02-01") + } + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{}, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + if userAssignedID != nil { + spt.inner.ClientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + imds, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return (u.Host == imds.Host && u.Path == imds.Path) || isAppService() +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + // Add header when runtime is on App Service or Functions + if isAppService() { + asMSISecret, _ := os.LookupEnv(asMSISecretEnv) + req.Header.Add("Secret", asMSISecret) + } + req = req.WithContext(ctx) + if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + req.Header.Set(metadataHeader, "true") + } + + var resp *http.Response + if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + // don't return a TokenRefreshError here; this will allow retry logic to apply + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.inner.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + for attempt < maxAttempts { + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 00000000000..c867b348439 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 00000000000..54e87b5b648 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,336 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) + } + return &BearerAuthorizerCallback{sender: s, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return &multiTenantSPTAuthorizer{tp: tp} +} + +type multiTenantSPTAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go new file mode 100644 index 00000000000..89a659cb664 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go @@ -0,0 +1,67 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "strings" +) + +// SASTokenAuthorizer implements an authorization for SAS Token Authentication +// this can be used for interaction with Blob Storage Endpoints +type SASTokenAuthorizer struct { + sasToken string +} + +// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials +func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { + if strings.TrimSpace(sasToken) == "" { + return nil, fmt.Errorf("sasToken cannot be empty") + } + + token := sasToken + if strings.HasPrefix(sasToken, "?") { + token = strings.TrimPrefix(sasToken, "?") + } + + return &SASTokenAuthorizer{ + sasToken: token, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the +// URI's query parameters. This can be used for the Blob, Queue, and File Services. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature +func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + if r.URL.RawQuery != "" { + r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) + } else { + r.URL.RawQuery = sas.sasToken + } + + r.RequestURI = r.URL.String() + return Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go new file mode 100644 index 00000000000..33e5f127017 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -0,0 +1,301 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// SharedKeyType defines the enumeration for the various shared key types. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. +type SharedKeyType string + +const ( + // SharedKey is used to authorize against blobs, files and queues services. + SharedKey SharedKeyType = "sharedKey" + + // SharedKeyForTable is used to authorize against the table service. + SharedKeyForTable SharedKeyType = "sharedKeyTable" + + // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for + // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. + SharedKeyLite SharedKeyType = "sharedKeyLite" + + // SharedKeyLiteForTable is used to authorize against the table service. It's provided for + // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. + SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" +) + +const ( + headerAccept = "Accept" + headerAcceptCharset = "Accept-Charset" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentLanguage = "Content-Language" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerDate = "Date" + headerXMSDate = "X-Ms-Date" + headerXMSVersion = "x-ms-version" + headerRange = "Range" +) + +const storageEmulatorAccountName = "devstoreaccount1" + +// SharedKeyAuthorizer implements an authorization for Shared Key +// this can be used for interaction with Blob, File and Queue Storage Endpoints +type SharedKeyAuthorizer struct { + accountName string + accountKey []byte + keyType SharedKeyType +} + +// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. +func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return nil, fmt.Errorf("malformed storage account key: %v", err) + } + return &SharedKeyAuthorizer{ + accountName: accountName, + accountKey: key, + keyType: keyType, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is " " followed by the computed key. +// This can be used for the Blob, Queue, and File Services +// +// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key +// You may use Shared Key authorization to authorize a request made against the +// 2009-09-19 version and later of the Blob and Queue services, +// and version 2014-02-14 and later of the File services. +func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) + return Prepare(r, WithHeader(headerAuthorization, sk)) + }) + } +} + +func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { + canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) + if err != nil { + return "", err + } + + if req.Header == nil { + req.Header = http.Header{} + } + + // ensure date is set + if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(headerXMSDate, date) + } + canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) + if err != nil { + return "", err + } + return createAuthorizationHeader(accName, accKey, canString, keyType), nil +} + +func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if accountName != storageEmulatorAccountName { + cr.WriteString("/") + cr.WriteString(getCanonicalizedAccountName(accountName)) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if keyType == SharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func getCanonicalizedAccountName(accountName string) string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + date := headers.Get(headerDate) + if v := headers.Get(headerXMSDate); v != "" { + if keyType == SharedKey || keyType == SharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch keyType { + case SharedKey: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + canonicalizedResource, + }, "\n") + case SharedKeyLite: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("key type '%s' is not supported", keyType) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := make(map[string]string) + + for k := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = headers.Get(k) + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { + h := hmac.New(sha256.New, accountKey) + h.Write([]byte(canonicalizedString)) + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + var key string + switch keyType { + case SharedKey, SharedKeyForTable: + key = "SharedKey" + case SharedKeyLite, SharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 00000000000..aafdf021fd6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 00000000000..1cb41cbeb1b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,924 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + return sender.Do(req) +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go new file mode 100644 index 00000000000..5f02026b391 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -0,0 +1,737 @@ +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "unicode/utf16" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/cli" + "github.com/dimchansky/utfbom" + "golang.org/x/crypto/pkcs12" +) + +// The possible keys in the Values map. +const ( + SubscriptionID = "AZURE_SUBSCRIPTION_ID" + TenantID = "AZURE_TENANT_ID" + AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS" + ClientID = "AZURE_CLIENT_ID" + ClientSecret = "AZURE_CLIENT_SECRET" + CertificatePath = "AZURE_CERTIFICATE_PATH" + CertificatePassword = "AZURE_CERTIFICATE_PASSWORD" + Username = "AZURE_USERNAME" + Password = "AZURE_PASSWORD" + EnvironmentName = "AZURE_ENVIRONMENT" + Resource = "AZURE_AD_RESOURCE" + ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint" + ResourceManagerEndpoint = "ResourceManagerEndpoint" + GraphResourceID = "GraphResourceID" + SQLManagementEndpoint = "SQLManagementEndpoint" + GalleryEndpoint = "GalleryEndpoint" + ManagementEndpoint = "ManagementEndpoint" +) + +// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + return settings.GetAuthorizer() +} + +// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + settings.Values[Resource] = resource + return settings.GetAuthorizer() +} + +// EnvironmentSettings contains the available authentication settings. +type EnvironmentSettings struct { + Values map[string]string + Environment azure.Environment +} + +// GetSettingsFromEnvironment returns the available authentication settings from the environment. +func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) { + s = EnvironmentSettings{ + Values: map[string]string{}, + } + s.setValue(SubscriptionID) + s.setValue(TenantID) + s.setValue(AuxiliaryTenantIDs) + s.setValue(ClientID) + s.setValue(ClientSecret) + s.setValue(CertificatePath) + s.setValue(CertificatePassword) + s.setValue(Username) + s.setValue(Password) + s.setValue(EnvironmentName) + s.setValue(Resource) + if v := s.Values[EnvironmentName]; v == "" { + s.Environment = azure.PublicCloud + } else { + s.Environment, err = azure.EnvironmentFromName(v) + } + if s.Values[Resource] == "" { + s.Values[Resource] = s.Environment.ResourceManagerEndpoint + } + return +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings EnvironmentSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified environment variable value to the Values map if it exists +func (settings EnvironmentSettings) setValue(key string) { + if v := os.Getenv(key); v != "" { + settings.Values[key] = v + } +} + +// helper to return client and tenant IDs +func (settings EnvironmentSettings) getClientAndTenant() (string, string) { + clientID := settings.Values[ClientID] + tenantID := settings.Values[TenantID] + return clientID, tenantID +} + +// GetClientCredentials creates a config object from the available client credentials. +// An error is returned if no client credentials are available. +func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) { + secret := settings.Values[ClientSecret] + if secret == "" { + return ClientCredentialsConfig{}, errors.New("missing client secret") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCredentialsConfig(clientID, secret, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok { + config.AuxTenants = strings.Split(auxTenants, ";") + for i := range config.AuxTenants { + config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i]) + } + } + return config, nil +} + +// GetClientCertificate creates a config object from the available certificate credentials. +// An error is returned if no certificate credentials are available. +func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) { + certPath := settings.Values[CertificatePath] + if certPath == "" { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + certPwd := settings.Values[CertificatePassword] + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetUsernamePassword creates a config object from the available username/password credentials. +// An error is returned if no username/password credentials are available. +func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) { + username := settings.Values[Username] + password := settings.Values[Password] + if username == "" || password == "" { + return UsernamePasswordConfig{}, errors.New("missing username/password") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewUsernamePasswordConfig(username, password, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetMSI creates a MSI config object from the available client ID. +func (settings EnvironmentSettings) GetMSI() MSIConfig { + config := NewMSIConfig() + config.Resource = settings.Values[Resource] + config.ClientID = settings.Values[ClientID] + return config +} + +// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs. +func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig { + clientID, tenantID := settings.getClientAndTenant() + config := NewDeviceFlowConfig(clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config +} + +// GetAuthorizer creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) { + //1.Client Credentials + if c, e := settings.GetClientCredentials(); e == nil { + return c.Authorizer() + } + + //2. Client Certificate + if c, e := settings.GetClientCertificate(); e == nil { + return c.Authorizer() + } + + //3. Username Password + if c, e := settings.GetUsernamePassword(); e == nil { + return c.Authorizer() + } + + // 4. MSI + return settings.GetMSI().Authorizer() +} + +// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil { + return a, err + } + if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) { + s, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil { + return a, err + } + if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLI() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + + if settings.Values[Resource] == "" { + settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint + } + + return NewAuthorizerFromCLIWithResource(settings.Values[Resource]) +} + +// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) { + token, err := cli.GetTokenFromCLI(resource) + if err != nil { + return nil, err + } + + adalToken, err := token.ToADALToken() + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(&adalToken), nil +} + +// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file. +func GetSettingsFromFile() (FileSettings, error) { + s := FileSettings{} + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return s, err + } + + // Auth file might be encoded + decoded, err := decode(contents) + if err != nil { + return s, err + } + + authFile := map[string]interface{}{} + err = json.Unmarshal(decoded, &authFile) + if err != nil { + return s, err + } + + s.Values = map[string]string{} + s.setKeyValue(ClientID, authFile["clientId"]) + s.setKeyValue(ClientSecret, authFile["clientSecret"]) + s.setKeyValue(CertificatePath, authFile["clientCertificate"]) + s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"]) + s.setKeyValue(SubscriptionID, authFile["subscriptionId"]) + s.setKeyValue(TenantID, authFile["tenantId"]) + s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"]) + s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"]) + s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"]) + s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"]) + s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"]) + s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"]) + return s, nil +} + +// FileSettings contains the available authentication settings. +type FileSettings struct { + Values map[string]string +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings FileSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified value to the Values map if it isn't nil +func (settings FileSettings) setKeyValue(key string, val interface{}) { + if val != nil { + settings.Values[key] = val.(string) + } +} + +// returns the specified AAD endpoint or the public cloud endpoint if unspecified +func (settings FileSettings) getAADEndpoint() string { + if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok { + return v + } + return azure.PublicCloud.ActiveDirectoryEndpoint +} + +// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) +} + +// ClientCredentialsAuthorizer creates an authorizer from the available client credentials. +func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCredentialsAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken +// from the available client credentials and the specified resource. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) { + if _, ok := settings.Values[ClientSecret]; !ok { + return nil, errors.New("missing client secret") + } + config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID]) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource) +} + +func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) { + if _, ok := settings.Values[CertificatePath]; !ok { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID]) + cfg.AADEndpoint = settings.getAADEndpoint() + cfg.Resource = resource + return cfg, nil +} + +// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource. +func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) + if err != nil { + return nil, err + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource) +} + +// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials. +func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCertificateAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.ServicePrincipalToken() +} + +// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource. +func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.Authorizer() +} + +func decode(b []byte) ([]byte, error) { + reader, enc := utfbom.Skip(bytes.NewReader(b)) + + switch enc { + case utfbom.UTF16LittleEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.LittleEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + case utfbom.UTF16BigEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.BigEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + } + return ioutil.ReadAll(reader) +} + +func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { + // Compare dafault base URI from the SDK to the endpoints from the public cloud + // Base URI and token resource are the same string. This func finds the authentication + // file field that matches the SDK base URI. The SDK defines the public cloud + // endpoint as its default base URI + if !strings.HasSuffix(baseURI, "/") { + baseURI += "/" + } + switch baseURI { + case azure.PublicCloud.ServiceManagementEndpoint: + return settings.Values[ManagementEndpoint], nil + case azure.PublicCloud.ResourceManagerEndpoint: + return settings.Values[ResourceManagerEndpoint], nil + case azure.PublicCloud.ActiveDirectoryEndpoint: + return settings.Values[ActiveDirectoryEndpoint], nil + case azure.PublicCloud.GalleryEndpoint: + return settings.Values[GalleryEndpoint], nil + case azure.PublicCloud.GraphEndpoint: + return settings.Values[GraphResourceID], nil + } + return "", fmt.Errorf("auth: base URI not found in endpoints") +} + +// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { + return ClientCredentialsConfig{ + ClientID: clientID, + ClientSecret: clientSecret, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { + return ClientCertificateConfig{ + CertificatePath: certificatePath, + CertificatePassword: certificatePassword, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { + return UsernamePasswordConfig{ + Username: username, + Password: password, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. +func NewMSIConfig() MSIConfig { + return MSIConfig{ + Resource: azure.PublicCloud.ResourceManagerEndpoint, + } +} + +// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { + return DeviceFlowConfig{ + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +//AuthorizerConfig provides an authorizer from the configuration provided. +type AuthorizerConfig interface { + Authorizer() (autorest.Authorizer, error) +} + +// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. +type ClientCredentialsConfig struct { + ClientID string + ClientSecret string + TenantID string + AuxTenants []string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { + oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) + if err != nil { + return nil, err + } + return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// Authorizer gets the authorizer from client credentials. +func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { + if len(ccc.AuxTenants) == 0 { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil + } + mtSPT, err := ccc.MultiTenantServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err) + } + return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil +} + +// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. +type ClientCertificateConfig struct { + ClientID string + CertificatePath string + CertificatePassword string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client certificate. +func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + certData, err := ioutil.ReadFile(ccc.CertificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) + } + certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword) + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) +} + +// Authorizer gets an authorizer object from client certificate. +func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. +type DeviceFlowConfig struct { + ClientID string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from device flow. +func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := dfc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalToken gets the service principal token from device flow. +func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) + if err != nil { + return nil, err + } + oauthClient := &autorest.Client{} + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + log.Println(*deviceCode.Message) + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. +type UsernamePasswordConfig struct { + ClientID string + Username string + Password string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from username and password. +func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) +} + +// Authorizer gets the authorizer from a username and a password. +func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ups.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// MSIConfig provides the options to get a bearer authorizer through MSI. +type MSIConfig struct { + Resource string + ClientID string +} + +// Authorizer gets the authorizer from MSI. +func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { + msiEndpoint, err := adal.GetMSIEndpoint() + if err != nil { + return nil, err + } + + var spToken *adal.ServicePrincipalToken + if mc.ClientID == "" { + spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) + } + } else { + spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err) + } + } + + return autorest.NewBearerAuthorizer(spToken), nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod new file mode 100644 index 00000000000..e98dfe8623a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/azure/auth + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.2 + github.com/Azure/go-autorest/autorest/adal v0.8.0 + github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 + github.com/dimchansky/utfbom v1.1.0 + golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum new file mode 100644 index 00000000000..d706fd5836b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum @@ -0,0 +1,36 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go new file mode 100644 index 00000000000..2f09cd177aa --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 00000000000..26be936b7e5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,335 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) + return nil + } + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner + se.AdditionalInfo = additional +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error" xml:"Error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + encodedAs := autorest.EncodedAsJSON + if strings.Contains(resp.Header.Get("Content-Type"), "xml") { + encodedAs = autorest.EncodedAsXML + } + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&e.ServiceError); err != nil { + return err + } + } + if e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body in hopes of getting something. + rawBody := map[string]interface{}{} + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&rawBody); err != nil { + return err + } + + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod new file mode 100644 index 00000000000..a58302914dc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/azure/cli + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.9.0 + github.com/Azure/go-autorest/autorest/adal v0.8.0 + github.com/Azure/go-autorest/autorest/date v0.2.0 + github.com/dimchansky/utfbom v1.1.0 + github.com/mitchellh/go-homedir v1.1.0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum new file mode 100644 index 00000000000..542806b9471 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum @@ -0,0 +1,29 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go new file mode 100644 index 00000000000..618bed392fc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 00000000000..f45c3a516d9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,83 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` + User *User `json:"user"` +} + +// User represents a User from the Azure CLI +type User struct { + Name string `json:"name"` + Type string `json:"type"` +} + +const azureProfileJSON = "azureProfile.json" + +func configDir() string { + return os.Getenv("AZURE_CONFIG_DIR") +} + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + if cfgDir := configDir(); cfgDir != "" { + return filepath.Join(cfgDir, azureProfileJSON), nil + } + return homedir.Expand("~/.azure/" + azureProfileJSON) +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 00000000000..44ff446f669 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,175 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +const accessTokensJSON = "accessTokens.json" + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +// TODO(#199): add unit test. +func AccessTokensPath() (string, error) { + // Azure-CLI allows user to customize the path of access tokens through environment variable. + if accessTokenPath := os.Getenv("AZURE_ACCESS_TOKEN_FILE"); accessTokenPath != "" { + return accessTokenPath, nil + } + + // Azure-CLI allows user to customize the path to Azure config directory through environment variable. + if cfgDir := configDir(); cfgDir != "" { + return filepath.Join(cfgDir, accessTokensJSON), nil + } + + // Fallback logic to default path on non-cloud-shell environment. + // TODO(#200): remove the dependency on hard-coding path. + return homedir.Expand("~/.azure/" + accessTokensJSON) +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} + +// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLI(resource string) (*Token, error) { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" + + // Validate resource, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) + } + + // Execute Azure CLI to get token + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) + cliCmd.Args = append(cliCmd.Args, "/c", "az") + } else { + cliCmd = exec.Command("az") + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) + } + cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource) + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) + if err != nil { + return nil, err + } + + return &tokenResponse, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 00000000000..6c20b8179ab --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,244 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 00000000000..507f9e95cf1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 00000000000..c6d39f68665 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,204 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + + var re RequestError + if strings.Contains(r.Header.Get("Content-Type"), "xml") { + // XML errors (e.g. Storage Data Plane) only return the inner object + err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) + } else { + err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) + } + + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 00000000000..1c6a0617a1f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,300 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + return sender(renengotiation) + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 00000000000..c4571065685 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 00000000000..3adc4804c3d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 + +require github.com/Azure/go-autorest/autorest v0.9.0 diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum new file mode 100644 index 00000000000..9e2ee7a9484 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -0,0 +1,16 @@ +github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 00000000000..55adf930f4a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest/autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 00000000000..b453fad0491 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 00000000000..48fb39ba9b9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 00000000000..7073959b2a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 00000000000..12addf0ebb4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 00000000000..f724f33327e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 00000000000..6f1fcd4a4db --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.8.0 + github.com/Azure/go-autorest/autorest/mocks v0.3.0 + github.com/Azure/go-autorest/logger v0.1.0 + github.com/Azure/go-autorest/tracing v0.5.0 + golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 00000000000..e0d94da0a25 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,30 @@ +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 00000000000..6e8ed64eba1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,550 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := MapToValues(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d + } + v[key] = value + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 00000000000..349e1963a2c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 00000000000..fa11dbed79b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 00000000000..7143cc61b58 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 00000000000..ae15c6bf962 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 00000000000..5e595d7b1a3 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,407 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/tls" + "fmt" + "log" + "math" + "net/http" + "net/http/cookiejar" + "strconv" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(tls.RenegotiateNever), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +func sender(renengotiation tls.RenegotiationSupport) Sender { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j, Transport: roundTripper} +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt := 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 00000000000..86694bd2555 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,152 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} + +// ByteSlicePtr returns a pointer to the passed byte slice. +func ByteSlicePtr(b []byte) *[]byte { + return &b +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod new file mode 100644 index 00000000000..a2054be36cd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/autorest/to + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 00000000000..08cf11c1189 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,228 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 00000000000..56a29b2c5d0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v13.3.0" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 00000000000..f22ed56bcde --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 00000000000..da09f394c5d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 00000000000..25c34c1085a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 00000000000..0e7a6e96254 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/DataDog/datadog-go/LICENSE.txt b/vendor/github.com/DataDog/datadog-go/LICENSE.txt new file mode 100644 index 00000000000..97cd06d7fb1 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2015 Datadog, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/DataDog/datadog-go/statsd/README.md b/vendor/github.com/DataDog/datadog-go/statsd/README.md new file mode 100644 index 00000000000..a2bca43b9af --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/README.md @@ -0,0 +1,5 @@ +## Overview + +Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags +and histograms. + diff --git a/vendor/github.com/DataDog/datadog-go/statsd/options.go b/vendor/github.com/DataDog/datadog-go/statsd/options.go new file mode 100644 index 00000000000..2c5a59cd53f --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/options.go @@ -0,0 +1,109 @@ +package statsd + +import "time" + +var ( + // DefaultNamespace is the default value for the Namespace option + DefaultNamespace = "" + // DefaultTags is the default value for the Tags option + DefaultTags = []string{} + // DefaultBuffered is the default value for the Buffered option + DefaultBuffered = false + // DefaultMaxMessagesPerPayload is the default value for the MaxMessagesPerPayload option + DefaultMaxMessagesPerPayload = 16 + // DefaultAsyncUDS is the default value for the AsyncUDS option + DefaultAsyncUDS = false + // DefaultWriteTimeoutUDS is the default value for the WriteTimeoutUDS option + DefaultWriteTimeoutUDS = 1 * time.Millisecond +) + +// Options contains the configuration options for a client. +type Options struct { + // Namespace to prepend to all metrics, events and service checks name. + Namespace string + // Tags are global tags to be applied to every metrics, events and service checks. + Tags []string + // Buffered allows to pack multiple DogStatsD messages in one payload. Messages will be buffered + // until the total size of the payload exceeds MaxMessagesPerPayload metrics, events and/or service + // checks or after 100ms since the payload startedto be built. + Buffered bool + // MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain. + // Note that this option only takes effect when the client is buffered. + MaxMessagesPerPayload int + // AsyncUDS allows to switch between async and blocking mode for UDS. + // Blocking mode allows for error checking but does not guarentee that calls won't block the execution. + AsyncUDS bool + // WriteTimeoutUDS is the timeout after which a UDS packet is dropped. + WriteTimeoutUDS time.Duration +} + +func resolveOptions(options []Option) (*Options, error) { + o := &Options{ + Namespace: DefaultNamespace, + Tags: DefaultTags, + Buffered: DefaultBuffered, + MaxMessagesPerPayload: DefaultMaxMessagesPerPayload, + AsyncUDS: DefaultAsyncUDS, + WriteTimeoutUDS: DefaultWriteTimeoutUDS, + } + + for _, option := range options { + err := option(o) + if err != nil { + return nil, err + } + } + + return o, nil +} + +// Option is a client option. Can return an error if validation fails. +type Option func(*Options) error + +// WithNamespace sets the Namespace option. +func WithNamespace(namespace string) Option { + return func(o *Options) error { + o.Namespace = namespace + return nil + } +} + +// WithTags sets the Tags option. +func WithTags(tags []string) Option { + return func(o *Options) error { + o.Tags = tags + return nil + } +} + +// Buffered sets the Buffered option. +func Buffered() Option { + return func(o *Options) error { + o.Buffered = true + return nil + } +} + +// WithMaxMessagesPerPayload sets the MaxMessagesPerPayload option. +func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option { + return func(o *Options) error { + o.MaxMessagesPerPayload = maxMessagesPerPayload + return nil + } +} + +// WithAsyncUDS sets the AsyncUDS option. +func WithAsyncUDS() Option { + return func(o *Options) error { + o.AsyncUDS = true + return nil + } +} + +// WithWriteTimeoutUDS sets the WriteTimeoutUDS option. +func WithWriteTimeoutUDS(writeTimeoutUDS time.Duration) Option { + return func(o *Options) error { + o.WriteTimeoutUDS = writeTimeoutUDS + return nil + } +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go new file mode 100644 index 00000000000..71a113cfcef --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go @@ -0,0 +1,757 @@ +// Copyright 2013 Ooyala, Inc. + +/* +Package statsd provides a Go dogstatsd client. Dogstatsd extends the popular statsd, +adding tags and histograms and pushing upstream to Datadog. + +Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD. + +Example Usage: + + // Create the client + c, err := statsd.New("127.0.0.1:8125") + if err != nil { + log.Fatal(err) + } + // Prefix every metric with the app name + c.Namespace = "flubber." + // Send the EC2 availability zone as a tag with every metric + c.Tags = append(c.Tags, "us-east-1a") + err = c.Gauge("request.duration", 1.2, nil, 1) + +statsd is based on go-statsd-client. +*/ +package statsd + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "strconv" + "strings" + "sync" + "time" +) + +/* +OptimalPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes +is optimal for regular networks with an MTU of 1500 so datagrams don't get +fragmented. It's generally recommended not to fragment UDP datagrams as losing +a single fragment will cause the entire datagram to be lost. + +This can be increased if your network has a greater MTU or you don't mind UDP +datagrams getting fragmented. The practical limit is MaxUDPPayloadSize +*/ +const OptimalPayloadSize = 1432 + +/* +MaxUDPPayloadSize defines the maximum payload size for a UDP datagram. +Its value comes from the calculation: 65535 bytes Max UDP datagram size - +8byte UDP header - 60byte max IP headers +any number greater than that will see frames being cut out. +*/ +const MaxUDPPayloadSize = 65467 + +/* +UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket +traffic instead of UDP. +*/ +const UnixAddressPrefix = "unix://" + +// Client-side entity ID injection for container tagging +const ( + entityIDEnvName = "DD_ENTITY_ID" + entityIDTagName = "dd.internal.entity_id" +) + +/* +Stat suffixes +*/ +var ( + gaugeSuffix = []byte("|g") + countSuffix = []byte("|c") + histogramSuffix = []byte("|h") + distributionSuffix = []byte("|d") + decrSuffix = []byte("-1|c") + incrSuffix = []byte("1|c") + setSuffix = []byte("|s") + timingSuffix = []byte("|ms") +) + +// A statsdWriter offers a standard interface regardless of the underlying +// protocol. For now UDS and UPD writers are available. +type statsdWriter interface { + Write(data []byte) (n int, err error) + SetWriteTimeout(time.Duration) error + Close() error +} + +// A Client is a handle for sending messages to dogstatsd. It is safe to +// use one Client from multiple goroutines simultaneously. +type Client struct { + // Writer handles the underlying networking protocol + writer statsdWriter + // Namespace to prepend to all statsd calls + Namespace string + // Tags are global tags to be added to every statsd call + Tags []string + // skipErrors turns off error passing and allows UDS to emulate UDP behaviour + SkipErrors bool + // BufferLength is the length of the buffer in commands. + bufferLength int + flushTime time.Duration + commands [][]byte + buffer bytes.Buffer + stop chan struct{} + sync.Mutex +} + +// New returns a pointer to a new Client given an addr in the format "hostname:port" or +// "unix:///path/to/socket". +func New(addr string, options ...Option) (*Client, error) { + o, err := resolveOptions(options) + if err != nil { + return nil, err + } + + var w statsdWriter + + if !strings.HasPrefix(addr, UnixAddressPrefix) { + w, err = newUDPWriter(addr) + } else if o.AsyncUDS { + w, err = newAsyncUdsWriter(addr[len(UnixAddressPrefix)-1:]) + } else { + w, err = newBlockingUdsWriter(addr[len(UnixAddressPrefix)-1:]) + } + if err != nil { + return nil, err + } + w.SetWriteTimeout(o.WriteTimeoutUDS) + + c := Client{ + Namespace: o.Namespace, + Tags: o.Tags, + writer: w, + } + + // Inject DD_ENTITY_ID as a constant tag if found + entityID := os.Getenv(entityIDEnvName) + if entityID != "" { + entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID) + c.Tags = append(c.Tags, entityTag) + } + + if o.Buffered { + c.bufferLength = o.MaxMessagesPerPayload + c.commands = make([][]byte, 0, o.MaxMessagesPerPayload) + c.flushTime = time.Millisecond * 100 + c.stop = make(chan struct{}, 1) + go c.watch() + } + + return &c, nil +} + +// NewWithWriter creates a new Client with given writer. Writer is a +// io.WriteCloser + SetWriteTimeout(time.Duration) error +func NewWithWriter(w statsdWriter) (*Client, error) { + client := &Client{writer: w, SkipErrors: false} + + // Inject DD_ENTITY_ID as a constant tag if found + entityID := os.Getenv(entityIDEnvName) + if entityID != "" { + entityTag := fmt.Sprintf("%s:%s", entityIDTagName, entityID) + client.Tags = append(client.Tags, entityTag) + } + + return client, nil +} + +// NewBuffered returns a Client that buffers its output and sends it in chunks. +// Buflen is the length of the buffer in number of commands. +// +// When addr is empty, the client will default to a UDP client and use the DD_AGENT_HOST +// and (optionally) the DD_DOGSTATSD_PORT environment variables to build the target address. +func NewBuffered(addr string, buflen int) (*Client, error) { + return New(addr, Buffered(), WithMaxMessagesPerPayload(buflen)) +} + +// format a message from its name, value, tags and rate. Also adds global +// namespace and tags. +func (c *Client) format(name string, value interface{}, suffix []byte, tags []string, rate float64) []byte { + // preallocated buffer, stack allocated as long as it doesn't escape + buf := make([]byte, 0, 200) + + if c.Namespace != "" { + buf = append(buf, c.Namespace...) + } + buf = append(buf, name...) + buf = append(buf, ':') + + switch val := value.(type) { + case float64: + buf = strconv.AppendFloat(buf, val, 'f', 6, 64) + + case int64: + buf = strconv.AppendInt(buf, val, 10) + + case string: + buf = append(buf, val...) + + default: + // do nothing + } + buf = append(buf, suffix...) + + if rate < 1 { + buf = append(buf, "|@"...) + buf = strconv.AppendFloat(buf, rate, 'f', -1, 64) + } + + buf = appendTagString(buf, c.Tags, tags) + + // non-zeroing copy to avoid referencing a larger than necessary underlying array + return append([]byte(nil), buf...) +} + +// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP. +func (c *Client) SetWriteTimeout(d time.Duration) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + return c.writer.SetWriteTimeout(d) +} + +func (c *Client) watch() { + ticker := time.NewTicker(c.flushTime) + + for { + select { + case <-ticker.C: + c.Lock() + if len(c.commands) > 0 { + // FIXME: eating error here + c.flushLocked() + } + c.Unlock() + case <-c.stop: + ticker.Stop() + return + } + } +} + +func (c *Client) append(cmd []byte) error { + c.Lock() + defer c.Unlock() + c.commands = append(c.commands, cmd) + // if we should flush, lets do it + if len(c.commands) == c.bufferLength { + if err := c.flushLocked(); err != nil { + return err + } + } + return nil +} + +func (c *Client) joinMaxSize(cmds [][]byte, sep string, maxSize int) ([][]byte, []int) { + c.buffer.Reset() //clear buffer + + var frames [][]byte + var ncmds []int + sepBytes := []byte(sep) + sepLen := len(sep) + + elem := 0 + for _, cmd := range cmds { + needed := len(cmd) + + if elem != 0 { + needed = needed + sepLen + } + + if c.buffer.Len()+needed <= maxSize { + if elem != 0 { + c.buffer.Write(sepBytes) + } + c.buffer.Write(cmd) + elem++ + } else { + frames = append(frames, copyAndResetBuffer(&c.buffer)) + ncmds = append(ncmds, elem) + // if cmd is bigger than maxSize it will get flushed on next loop + c.buffer.Write(cmd) + elem = 1 + } + } + + //add whatever is left! if there's actually something + if c.buffer.Len() > 0 { + frames = append(frames, copyAndResetBuffer(&c.buffer)) + ncmds = append(ncmds, elem) + } + + return frames, ncmds +} + +func copyAndResetBuffer(buf *bytes.Buffer) []byte { + tmpBuf := make([]byte, buf.Len()) + copy(tmpBuf, buf.Bytes()) + buf.Reset() + return tmpBuf +} + +// Flush forces a flush of the pending commands in the buffer +func (c *Client) Flush() error { + if c == nil { + return fmt.Errorf("Client is nil") + } + c.Lock() + defer c.Unlock() + return c.flushLocked() +} + +// flush the commands in the buffer. Lock must be held by caller. +func (c *Client) flushLocked() error { + frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize) + var err error + cmdsFlushed := 0 + for i, data := range frames { + _, e := c.writer.Write(data) + if e != nil { + err = e + break + } + cmdsFlushed += flushable[i] + } + + // clear the slice with a slice op, doesn't realloc + if cmdsFlushed == len(c.commands) { + c.commands = c.commands[:0] + } else { + //this case will cause a future realloc... + // drop problematic command though (sorry). + c.commands = c.commands[cmdsFlushed+1:] + } + return err +} + +func (c *Client) sendMsg(msg []byte) error { + // return an error if message is bigger than MaxUDPPayloadSize + if len(msg) > MaxUDPPayloadSize { + return errors.New("message size exceeds MaxUDPPayloadSize") + } + + // if this client is buffered, then we'll just append this + if c.bufferLength > 0 { + return c.append(msg) + } + + _, err := c.writer.Write(msg) + + if c.SkipErrors { + return nil + } + return err +} + +// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags. +func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + if rate < 1 && rand.Float64() > rate { + return nil + } + data := c.format(name, value, suffix, tags, rate) + return c.sendMsg(data) +} + +// Gauge measures the value of a metric at a particular time. +func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, gaugeSuffix, tags, rate) +} + +// Count tracks how many times something happened per second. +func (c *Client) Count(name string, value int64, tags []string, rate float64) error { + return c.send(name, value, countSuffix, tags, rate) +} + +// Histogram tracks the statistical distribution of a set of values on each host. +func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, histogramSuffix, tags, rate) +} + +// Distribution tracks the statistical distribution of a set of values across your infrastructure. +func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, distributionSuffix, tags, rate) +} + +// Decr is just Count of -1 +func (c *Client) Decr(name string, tags []string, rate float64) error { + return c.send(name, nil, decrSuffix, tags, rate) +} + +// Incr is just Count of 1 +func (c *Client) Incr(name string, tags []string, rate float64) error { + return c.send(name, nil, incrSuffix, tags, rate) +} + +// Set counts the number of unique elements in a group. +func (c *Client) Set(name string, value string, tags []string, rate float64) error { + return c.send(name, value, setSuffix, tags, rate) +} + +// Timing sends timing information, it is an alias for TimeInMilliseconds +func (c *Client) Timing(name string, value time.Duration, tags []string, rate float64) error { + return c.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate) +} + +// TimeInMilliseconds sends timing information in milliseconds. +// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) +func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { + return c.send(name, value, timingSuffix, tags, rate) +} + +// Event sends the provided Event. +func (c *Client) Event(e *Event) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + stat, err := e.Encode(c.Tags...) + if err != nil { + return err + } + return c.sendMsg([]byte(stat)) +} + +// SimpleEvent sends an event with the provided title and text. +func (c *Client) SimpleEvent(title, text string) error { + e := NewEvent(title, text) + return c.Event(e) +} + +// ServiceCheck sends the provided ServiceCheck. +func (c *Client) ServiceCheck(sc *ServiceCheck) error { + if c == nil { + return fmt.Errorf("Client is nil") + } + stat, err := sc.Encode(c.Tags...) + if err != nil { + return err + } + return c.sendMsg([]byte(stat)) +} + +// SimpleServiceCheck sends an serviceCheck with the provided name and status. +func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) error { + sc := NewServiceCheck(name, status) + return c.ServiceCheck(sc) +} + +// Close the client connection. +func (c *Client) Close() error { + if c == nil { + return fmt.Errorf("Client is nil") + } + select { + case c.stop <- struct{}{}: + default: + } + + // if this client is buffered, flush before closing the writer + if c.bufferLength > 0 { + if err := c.Flush(); err != nil { + return err + } + } + + return c.writer.Close() +} + +// Events support +// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41 +// The reason why they got exported is so that client code can directly use the types. + +// EventAlertType is the alert type for events +type EventAlertType string + +const ( + // Info is the "info" AlertType for events + Info EventAlertType = "info" + // Error is the "error" AlertType for events + Error EventAlertType = "error" + // Warning is the "warning" AlertType for events + Warning EventAlertType = "warning" + // Success is the "success" AlertType for events + Success EventAlertType = "success" +) + +// EventPriority is the event priority for events +type EventPriority string + +const ( + // Normal is the "normal" Priority for events + Normal EventPriority = "normal" + // Low is the "low" Priority for events + Low EventPriority = "low" +) + +// An Event is an object that can be posted to your DataDog event stream. +type Event struct { + // Title of the event. Required. + Title string + // Text is the description of the event. Required. + Text string + // Timestamp is a timestamp for the event. If not provided, the dogstatsd + // server will set this to the current time. + Timestamp time.Time + // Hostname for the event. + Hostname string + // AggregationKey groups this event with others of the same key. + AggregationKey string + // Priority of the event. Can be statsd.Low or statsd.Normal. + Priority EventPriority + // SourceTypeName is a source type for the event. + SourceTypeName string + // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success. + // If absent, the default value applied by the dogstatsd server is Info. + AlertType EventAlertType + // Tags for the event. + Tags []string +} + +// NewEvent creates a new event with the given title and text. Error checking +// against these values is done at send-time, or upon running e.Check. +func NewEvent(title, text string) *Event { + return &Event{ + Title: title, + Text: text, + } +} + +// Check verifies that an event is valid. +func (e Event) Check() error { + if len(e.Title) == 0 { + return fmt.Errorf("statsd.Event title is required") + } + if len(e.Text) == 0 { + return fmt.Errorf("statsd.Event text is required") + } + return nil +} + +// Encode returns the dogstatsd wire protocol representation for an event. +// Tags may be passed which will be added to the encoded output but not to +// the Event's list of tags, eg. for default tags. +func (e Event) Encode(tags ...string) (string, error) { + err := e.Check() + if err != nil { + return "", err + } + text := e.escapedText() + + var buffer bytes.Buffer + buffer.WriteString("_e{") + buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10)) + buffer.WriteRune(',') + buffer.WriteString(strconv.FormatInt(int64(len(text)), 10)) + buffer.WriteString("}:") + buffer.WriteString(e.Title) + buffer.WriteRune('|') + buffer.WriteString(text) + + if !e.Timestamp.IsZero() { + buffer.WriteString("|d:") + buffer.WriteString(strconv.FormatInt(int64(e.Timestamp.Unix()), 10)) + } + + if len(e.Hostname) != 0 { + buffer.WriteString("|h:") + buffer.WriteString(e.Hostname) + } + + if len(e.AggregationKey) != 0 { + buffer.WriteString("|k:") + buffer.WriteString(e.AggregationKey) + + } + + if len(e.Priority) != 0 { + buffer.WriteString("|p:") + buffer.WriteString(string(e.Priority)) + } + + if len(e.SourceTypeName) != 0 { + buffer.WriteString("|s:") + buffer.WriteString(e.SourceTypeName) + } + + if len(e.AlertType) != 0 { + buffer.WriteString("|t:") + buffer.WriteString(string(e.AlertType)) + } + + writeTagString(&buffer, tags, e.Tags) + + return buffer.String(), nil +} + +// ServiceCheckStatus support +type ServiceCheckStatus byte + +const ( + // Ok is the "ok" ServiceCheck status + Ok ServiceCheckStatus = 0 + // Warn is the "warning" ServiceCheck status + Warn ServiceCheckStatus = 1 + // Critical is the "critical" ServiceCheck status + Critical ServiceCheckStatus = 2 + // Unknown is the "unknown" ServiceCheck status + Unknown ServiceCheckStatus = 3 +) + +// An ServiceCheck is an object that contains status of DataDog service check. +type ServiceCheck struct { + // Name of the service check. Required. + Name string + // Status of service check. Required. + Status ServiceCheckStatus + // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd + // server will set this to the current time. + Timestamp time.Time + // Hostname for the serviceCheck. + Hostname string + // A message describing the current state of the serviceCheck. + Message string + // Tags for the serviceCheck. + Tags []string +} + +// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking +// against these values is done at send-time, or upon running sc.Check. +func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck { + return &ServiceCheck{ + Name: name, + Status: status, + } +} + +// Check verifies that an event is valid. +func (sc ServiceCheck) Check() error { + if len(sc.Name) == 0 { + return fmt.Errorf("statsd.ServiceCheck name is required") + } + if byte(sc.Status) < 0 || byte(sc.Status) > 3 { + return fmt.Errorf("statsd.ServiceCheck status has invalid value") + } + return nil +} + +// Encode returns the dogstatsd wire protocol representation for an serviceCheck. +// Tags may be passed which will be added to the encoded output but not to +// the Event's list of tags, eg. for default tags. +func (sc ServiceCheck) Encode(tags ...string) (string, error) { + err := sc.Check() + if err != nil { + return "", err + } + message := sc.escapedMessage() + + var buffer bytes.Buffer + buffer.WriteString("_sc|") + buffer.WriteString(sc.Name) + buffer.WriteRune('|') + buffer.WriteString(strconv.FormatInt(int64(sc.Status), 10)) + + if !sc.Timestamp.IsZero() { + buffer.WriteString("|d:") + buffer.WriteString(strconv.FormatInt(int64(sc.Timestamp.Unix()), 10)) + } + + if len(sc.Hostname) != 0 { + buffer.WriteString("|h:") + buffer.WriteString(sc.Hostname) + } + + writeTagString(&buffer, tags, sc.Tags) + + if len(message) != 0 { + buffer.WriteString("|m:") + buffer.WriteString(message) + } + + return buffer.String(), nil +} + +func (e Event) escapedText() string { + return strings.Replace(e.Text, "\n", "\\n", -1) +} + +func (sc ServiceCheck) escapedMessage() string { + msg := strings.Replace(sc.Message, "\n", "\\n", -1) + return strings.Replace(msg, "m:", `m\:`, -1) +} + +func removeNewlines(str string) string { + return strings.Replace(str, "\n", "", -1) +} + +func writeTagString(w io.Writer, tagList1, tagList2 []string) { + // the tag lists may be shared with other callers, so we cannot modify + // them in any way (which means we cannot append to them either) + // therefore we must make an entirely separate copy just for this call + totalLen := len(tagList1) + len(tagList2) + if totalLen == 0 { + return + } + tags := make([]string, 0, totalLen) + tags = append(tags, tagList1...) + tags = append(tags, tagList2...) + + io.WriteString(w, "|#") + io.WriteString(w, removeNewlines(tags[0])) + for _, tag := range tags[1:] { + io.WriteString(w, ",") + io.WriteString(w, removeNewlines(tag)) + } +} + +func appendTagString(buf []byte, tagList1, tagList2 []string) []byte { + if len(tagList1) == 0 { + if len(tagList2) == 0 { + return buf + } + tagList1 = tagList2 + tagList2 = nil + } + + buf = append(buf, "|#"...) + buf = appendWithoutNewlines(buf, tagList1[0]) + for _, tag := range tagList1[1:] { + buf = append(buf, ',') + buf = appendWithoutNewlines(buf, tag) + } + for _, tag := range tagList2 { + buf = append(buf, ',') + buf = appendWithoutNewlines(buf, tag) + } + return buf +} + +func appendWithoutNewlines(buf []byte, s string) []byte { + // fastpath for strings without newlines + if strings.IndexByte(s, '\n') == -1 { + return append(buf, s...) + } + + for _, b := range []byte(s) { + if b != '\n' { + buf = append(buf, b) + } + } + return buf +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/udp.go b/vendor/github.com/DataDog/datadog-go/statsd/udp.go new file mode 100644 index 00000000000..9ddff421c70 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/udp.go @@ -0,0 +1,73 @@ +package statsd + +import ( + "errors" + "fmt" + "net" + "os" + "time" +) + +const ( + autoHostEnvName = "DD_AGENT_HOST" + autoPortEnvName = "DD_DOGSTATSD_PORT" + defaultUDPPort = "8125" +) + +// udpWriter is an internal class wrapping around management of UDP connection +type udpWriter struct { + conn net.Conn +} + +// New returns a pointer to a new udpWriter given an addr in the format "hostname:port". +func newUDPWriter(addr string) (*udpWriter, error) { + if addr == "" { + addr = addressFromEnvironment() + } + if addr == "" { + return nil, errors.New("No address passed and autodetection from environment failed") + } + + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + writer := &udpWriter{conn: conn} + return writer, nil +} + +// SetWriteTimeout is not needed for UDP, returns error +func (w *udpWriter) SetWriteTimeout(d time.Duration) error { + return errors.New("SetWriteTimeout: not supported for UDP connections") +} + +// Write data to the UDP connection with no error handling +func (w *udpWriter) Write(data []byte) (int, error) { + return w.conn.Write(data) +} + +func (w *udpWriter) Close() error { + return w.conn.Close() +} + +func (w *udpWriter) remoteAddr() net.Addr { + return w.conn.RemoteAddr() +} + +func addressFromEnvironment() string { + autoHost := os.Getenv(autoHostEnvName) + if autoHost == "" { + return "" + } + + autoPort := os.Getenv(autoPortEnvName) + if autoPort == "" { + autoPort = defaultUDPPort + } + + return fmt.Sprintf("%s:%s", autoHost, autoPort) +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds.go b/vendor/github.com/DataDog/datadog-go/statsd/uds.go new file mode 100644 index 00000000000..cc2537e000f --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds.go @@ -0,0 +1,11 @@ +package statsd + +import ( + "time" +) + +/* +UDSTimeout holds the default timeout for UDS socket writes, as they can get +blocking when the receiving buffer is full. +*/ +const defaultUDSTimeout = 1 * time.Millisecond diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go b/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go new file mode 100644 index 00000000000..39d4ccb2344 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds_async.go @@ -0,0 +1,113 @@ +package statsd + +import ( + "fmt" + "net" + "time" +) + +// asyncUdsWriter is an internal class wrapping around management of UDS connection +type asyncUdsWriter struct { + // Address to send metrics to, needed to allow reconnection on error + addr net.Addr + // Established connection object, or nil if not connected yet + conn net.Conn + // write timeout + writeTimeout time.Duration + // datagramQueue is the queue of datagrams ready to be sent + datagramQueue chan []byte + stopChan chan struct{} +} + +// New returns a pointer to a new asyncUdsWriter given a socket file path as addr. +func newAsyncUdsWriter(addr string) (*asyncUdsWriter, error) { + udsAddr, err := net.ResolveUnixAddr("unixgram", addr) + if err != nil { + return nil, err + } + + writer := &asyncUdsWriter{ + addr: udsAddr, + conn: nil, + writeTimeout: defaultUDSTimeout, + // 8192 * 8KB = 65.5MB + datagramQueue: make(chan []byte, 8192), + stopChan: make(chan struct{}, 1), + } + + go writer.sendLoop() + return writer, nil +} + +func (w *asyncUdsWriter) sendLoop() { + for { + select { + case datagram := <-w.datagramQueue: + w.write(datagram) + case <-w.stopChan: + return + } + } +} + +// SetWriteTimeout allows the user to set a custom write timeout +func (w *asyncUdsWriter) SetWriteTimeout(d time.Duration) error { + w.writeTimeout = d + return nil +} + +// Write data to the UDS connection with write timeout and minimal error handling: +// create the connection if nil, and destroy it if the statsd server has disconnected +func (w *asyncUdsWriter) Write(data []byte) (int, error) { + select { + case w.datagramQueue <- data: + return len(data), nil + default: + return 0, fmt.Errorf("uds datagram queue is full (the agent might not be able to keep up)") + } +} + +// write writes the given data to the UDS. +// This function is **not** thread safe. +func (w *asyncUdsWriter) write(data []byte) (int, error) { + conn, err := w.ensureConnection() + if err != nil { + return 0, err + } + + conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) + n, err := conn.Write(data) + + if e, isNetworkErr := err.(net.Error); !isNetworkErr || !e.Temporary() { + // err is not temporary, Statsd server disconnected, retry connecting at next packet + w.unsetConnection() + return 0, e + } + + return n, err +} + +func (w *asyncUdsWriter) Close() error { + close(w.stopChan) + if w.conn != nil { + return w.conn.Close() + } + return nil +} + +func (w *asyncUdsWriter) ensureConnection() (net.Conn, error) { + if w.conn != nil { + return w.conn, nil + } + + newConn, err := net.Dial(w.addr.Network(), w.addr.String()) + if err != nil { + return nil, err + } + w.conn = newConn + return newConn, nil +} + +func (w *asyncUdsWriter) unsetConnection() { + w.conn = nil +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go b/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go new file mode 100644 index 00000000000..70ee99ab31a --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/uds_blocking.go @@ -0,0 +1,92 @@ +package statsd + +import ( + "net" + "sync" + "time" +) + +// blockingUdsWriter is an internal class wrapping around management of UDS connection +type blockingUdsWriter struct { + // Address to send metrics to, needed to allow reconnection on error + addr net.Addr + // Established connection object, or nil if not connected yet + conn net.Conn + // write timeout + writeTimeout time.Duration + sync.RWMutex // used to lock conn / writer can replace it +} + +// New returns a pointer to a new blockingUdsWriter given a socket file path as addr. +func newBlockingUdsWriter(addr string) (*blockingUdsWriter, error) { + udsAddr, err := net.ResolveUnixAddr("unixgram", addr) + if err != nil { + return nil, err + } + // Defer connection to first Write + writer := &blockingUdsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout} + return writer, nil +} + +// SetWriteTimeout allows the user to set a custom write timeout +func (w *blockingUdsWriter) SetWriteTimeout(d time.Duration) error { + w.writeTimeout = d + return nil +} + +// Write data to the UDS connection with write timeout and minimal error handling: +// create the connection if nil, and destroy it if the statsd server has disconnected +func (w *blockingUdsWriter) Write(data []byte) (int, error) { + conn, err := w.ensureConnection() + if err != nil { + return 0, err + } + + conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) + n, e := conn.Write(data) + + if err, isNetworkErr := e.(net.Error); !isNetworkErr || !err.Temporary() { + // Statsd server disconnected, retry connecting at next packet + w.unsetConnection() + return 0, e + } + return n, e +} + +func (w *blockingUdsWriter) Close() error { + if w.conn != nil { + return w.conn.Close() + } + return nil +} + +func (w *blockingUdsWriter) ensureConnection() (net.Conn, error) { + // Check if we've already got a socket we can use + w.RLock() + currentConn := w.conn + w.RUnlock() + + if currentConn != nil { + return currentConn, nil + } + + // Looks like we might need to connect - try again with write locking. + w.Lock() + defer w.Unlock() + if w.conn != nil { + return w.conn, nil + } + + newConn, err := net.Dial(w.addr.Network(), w.addr.String()) + if err != nil { + return nil, err + } + w.conn = newConn + return newConn, nil +} + +func (w *blockingUdsWriter) unsetConnection() { + w.Lock() + defer w.Unlock() + w.conn = nil +} diff --git a/vendor/github.com/DataDog/zstd/.travis.yml b/vendor/github.com/DataDog/zstd/.travis.yml new file mode 100644 index 00000000000..c5aa33dc3c2 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/.travis.yml @@ -0,0 +1,30 @@ +language: go + +go: + - 1.9.x + - 1.10.x + - 1.11.x + +os: + - linux + - osx + +matrix: + include: + name: "Go 1.11.x CentOS 32bits" + language: go + go: 1.11.x + os: linux + services: + - docker + script: + # Please update Go version in travis_test_32 as needed + - "docker run -i -v \"${PWD}:/zstd\" toopher/centos-i386:centos6 /bin/bash -c \"linux32 --32bit i386 /zstd/travis_test_32.sh\"" + +install: + - "wget https://github.com/DataDog/zstd/files/2246767/mr.zip" + - "unzip mr.zip" +script: + - "go build" + - "PAYLOAD=`pwd`/mr go test -v" + - "PAYLOAD=`pwd`/mr go test -bench ." diff --git a/vendor/github.com/DataDog/zstd/LICENSE b/vendor/github.com/DataDog/zstd/LICENSE new file mode 100644 index 00000000000..345c1eb932f --- /dev/null +++ b/vendor/github.com/DataDog/zstd/LICENSE @@ -0,0 +1,27 @@ +Simplified BSD License + +Copyright (c) 2016, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DataDog/zstd/README.md b/vendor/github.com/DataDog/zstd/README.md new file mode 100644 index 00000000000..6c02e168a0c --- /dev/null +++ b/vendor/github.com/DataDog/zstd/README.md @@ -0,0 +1,120 @@ +# Zstd Go Wrapper + +[C Zstd Homepage](https://github.com/Cyan4973/zstd) + +The current headers and C files are from *v1.3.4* (Commit +[2555975](https://github.com/facebook/zstd/releases/tag/v1.3.4)). + +## Usage + +There are two main APIs: + +* simple Compress/Decompress +* streaming API (io.Reader/io.Writer) + +The compress/decompress APIs mirror that of lz4, while the streaming API was +designed to be a drop-in replacement for zlib. + +### Simple `Compress/Decompress` + + +```go +// Compress compresses the byte array given in src and writes it to dst. +// If you already have a buffer allocated, you can pass it to prevent allocation +// If not, you can pass nil as dst. +// If the buffer is too small, it will be reallocated, resized, and returned bu the function +// If dst is nil, this will allocate the worst case size (CompressBound(src)) +Compress(dst, src []byte) ([]byte, error) +``` + +```go +// CompressLevel is the same as Compress but you can pass another compression level +CompressLevel(dst, src []byte, level int) ([]byte, error) +``` + +```go +// Decompress will decompress your payload into dst. +// If you already have a buffer allocated, you can pass it to prevent allocation +// If not, you can pass nil as dst (allocates a 4*src size as default). +// If the buffer is too small, it will retry 3 times by doubling the dst size +// After max retries, it will switch to the slower stream API to be sure to be able +// to decompress. Currently switches if compression ratio > 4*2**3=32. +Decompress(dst, src []byte) ([]byte, error) +``` + +### Stream API + +```go +// NewWriter creates a new object that can optionally be initialized with +// a precomputed dictionary. If dict is nil, compress without a dictionary. +// The dictionary array should not be changed during the use of this object. +// You MUST CALL Close() to write the last bytes of a zstd stream and free C objects. +NewWriter(w io.Writer) *Writer +NewWriterLevel(w io.Writer, level int) *Writer +NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer + +// Write compresses the input data and write it to the underlying writer +(w *Writer) Write(p []byte) (int, error) + +// Close flushes the buffer and frees C zstd objects +(w *Writer) Close() error +``` + +```go +// NewReader returns a new io.ReadCloser that will decompress data from the +// underlying reader. If a dictionary is provided to NewReaderDict, it must +// not be modified until Close is called. It is the caller's responsibility +// to call Close, which frees up C objects. +NewReader(r io.Reader) io.ReadCloser +NewReaderDict(r io.Reader, dict []byte) io.ReadCloser +``` + +### Benchmarks (benchmarked with v0.5.0) + +The author of Zstd also wrote lz4. Zstd is intended to occupy a speed/ratio +level similar to what zlib currently provides. In our tests, the can always +be made to be better than zlib by chosing an appropriate level while still +keeping compression and decompression time faster than zlib. + +You can run the benchmarks against your own payloads by using the Go benchmarks tool. +Just export your payload filepath as the `PAYLOAD` environment variable and run the benchmarks: + +```go +go test -bench . +``` + +Compression of a 7Mb pdf zstd (this wrapper) vs [czlib](https://github.com/DataDog/czlib): +``` +BenchmarkCompression 5 221056624 ns/op 67.34 MB/s +BenchmarkDecompression 100 18370416 ns/op 810.32 MB/s + +BenchmarkFzlibCompress 2 610156603 ns/op 24.40 MB/s +BenchmarkFzlibDecompress 20 81195246 ns/op 183.33 MB/s +``` + +Ratio is also better by a margin of ~20%. +Compression speed is always better than zlib on all the payloads we tested; +However, [czlib](https://github.com/DataDog/czlib) has optimisations that make it +faster at decompressiong small payloads: + +``` +Testing with size: 11... czlib: 8.97 MB/s, zstd: 3.26 MB/s +Testing with size: 27... czlib: 23.3 MB/s, zstd: 8.22 MB/s +Testing with size: 62... czlib: 31.6 MB/s, zstd: 19.49 MB/s +Testing with size: 141... czlib: 74.54 MB/s, zstd: 42.55 MB/s +Testing with size: 323... czlib: 155.14 MB/s, zstd: 99.39 MB/s +Testing with size: 739... czlib: 235.9 MB/s, zstd: 216.45 MB/s +Testing with size: 1689... czlib: 116.45 MB/s, zstd: 345.64 MB/s +Testing with size: 3858... czlib: 176.39 MB/s, zstd: 617.56 MB/s +Testing with size: 8811... czlib: 254.11 MB/s, zstd: 824.34 MB/s +Testing with size: 20121... czlib: 197.43 MB/s, zstd: 1339.11 MB/s +Testing with size: 45951... czlib: 201.62 MB/s, zstd: 1951.57 MB/s +``` + +zstd starts to shine with payloads > 1KB + +### Stability - Current state: STABLE + +The C library seems to be pretty stable and according to the author has been tested and fuzzed. + +For the Go wrapper, the test cover most usual cases and we have succesfully tested it on all staging and prod data. diff --git a/vendor/github.com/DataDog/zstd/ZSTD_LICENSE b/vendor/github.com/DataDog/zstd/ZSTD_LICENSE new file mode 100644 index 00000000000..a793a802892 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/ZSTD_LICENSE @@ -0,0 +1,30 @@ +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/DataDog/zstd/bitstream.h b/vendor/github.com/DataDog/zstd/bitstream.h new file mode 100644 index 00000000000..f7f389fe0fa --- /dev/null +++ b/vendor/github.com/DataDog/zstd/bitstream.h @@ -0,0 +1,471 @@ +/* ****************************************************************** + bitstream + Part of FSE library + header file (to include) + Copyright (C) 2013-2017, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ +#ifndef BITSTREAM_H_MODULE +#define BITSTREAM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + +/* +* This API consists of small unitary functions, which must be inlined for best performance. +* Since link-time-optimization is not available for all compilers, +* these functions are defined into a .h to be included. +*/ + +/*-**************************************** +* Dependencies +******************************************/ +#include "mem.h" /* unaligned access routines */ +#include "error_private.h" /* error codes and messages */ + + +/*-************************************* +* Debug +***************************************/ +#if defined(BIT_DEBUG) && (BIT_DEBUG>=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + + +/*========================================= +* Target specific +=========================================*/ +#if defined(__BMI__) && defined(__GNUC__) +# include /* support for bextr (experimental) */ +#endif + +#define STREAM_ACCUMULATOR_MIN_32 25 +#define STREAM_ACCUMULATOR_MIN_64 57 +#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) + + +/*-****************************************** +* bitStream encoding API (write forward) +********************************************/ +/* bitStream can mix input from multiple sources. + * A critical property of these streams is that they encode and decode in **reverse** direction. + * So the first bit sequence you add will be the last to be read, like a LIFO stack. + */ +typedef struct +{ + size_t bitContainer; + unsigned bitPos; + char* startPtr; + char* ptr; + char* endPtr; +} BIT_CStream_t; + +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); + +/* Start with initCStream, providing the size of buffer to write into. +* bitStream will never write outside of this buffer. +* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. +* +* bits are first added to a local register. +* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. +* Writing data into memory is an explicit operation, performed by the flushBits function. +* Hence keep track how many bits are potentially stored into local register to avoid register overflow. +* After a flushBits, a maximum of 7 bits might still be stored into local register. +* +* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. +* +* Last operation is to close the bitStream. +* The function returns the final size of CStream in bytes. +* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) +*/ + + +/*-******************************************** +* bitStream decoding API (read backward) +**********************************************/ +typedef struct +{ + size_t bitContainer; + unsigned bitsConsumed; + const char* ptr; + const char* start; + const char* limitPtr; +} BIT_DStream_t; + +typedef enum { BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ + /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ + +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); + + +/* Start by invoking BIT_initDStream(). +* A chunk of the bitStream is then stored into a local register. +* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). +* You can then retrieve bitFields stored into the local register, **in reverse order**. +* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. +* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. +* Otherwise, it can be less than that, so proceed accordingly. +* Checking if DStream has reached its end can be performed with BIT_endOfDStream(). +*/ + + +/*-**************************************** +* unsafe API +******************************************/ +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ + +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); +/* unsafe version; does not check buffer overflow */ + +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); +/* faster, but works only if nbBits >= 1 */ + + + +/*-************************************************************** +* Internal functions +****************************************************************/ +MEM_STATIC unsigned BIT_highbit32 (U32 val) +{ + assert(val != 0); + { +# if defined(_MSC_VER) /* Visual */ + unsigned long r=0; + _BitScanReverse ( &r, val ); + return (unsigned) r; +# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ + return 31 - __builtin_clz (val); +# else /* Software version */ + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, + 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, + 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; +# endif + } +} + +/*===== Local Constants =====*/ +static const unsigned BIT_mask[] = { + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, + 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, + 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, + 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */ +#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) + +/*-************************************************************** +* bitStream encoding +****************************************************************/ +/*! BIT_initCStream() : + * `dstCapacity` must be > sizeof(size_t) + * @return : 0 if success, + * otherwise an error code (can be tested using ERR_isError()) */ +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, + void* startPtr, size_t dstCapacity) +{ + bitC->bitContainer = 0; + bitC->bitPos = 0; + bitC->startPtr = (char*)startPtr; + bitC->ptr = bitC->startPtr; + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); + if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); + return 0; +} + +/*! BIT_addBits() : + * can add up to 31 bits into `bitC`. + * Note : does not check for register overflow ! */ +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, + size_t value, unsigned nbBits) +{ + MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32); + assert(nbBits < BIT_MASK_SIZE); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); + bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; + bitC->bitPos += nbBits; +} + +/*! BIT_addBitsFast() : + * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, + size_t value, unsigned nbBits) +{ + assert((value>>nbBits) == 0); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); + bitC->bitContainer |= value << bitC->bitPos; + bitC->bitPos += nbBits; +} + +/*! BIT_flushBitsFast() : + * assumption : bitContainer has not overflowed + * unsafe version; does not check buffer overflow */ +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) +{ + size_t const nbBytes = bitC->bitPos >> 3; + assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); + MEM_writeLEST(bitC->ptr, bitC->bitContainer); + bitC->ptr += nbBytes; + assert(bitC->ptr <= bitC->endPtr); + bitC->bitPos &= 7; + bitC->bitContainer >>= nbBytes*8; +} + +/*! BIT_flushBits() : + * assumption : bitContainer has not overflowed + * safe version; check for buffer overflow, and prevents it. + * note : does not signal buffer overflow. + * overflow will be revealed later on using BIT_closeCStream() */ +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) +{ + size_t const nbBytes = bitC->bitPos >> 3; + assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); + MEM_writeLEST(bitC->ptr, bitC->bitContainer); + bitC->ptr += nbBytes; + if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; + bitC->bitPos &= 7; + bitC->bitContainer >>= nbBytes*8; +} + +/*! BIT_closeCStream() : + * @return : size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) +{ + BIT_addBitsFast(bitC, 1, 1); /* endMark */ + BIT_flushBits(bitC); + if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ + return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); +} + + +/*-******************************************************** +* bitStream decoding +**********************************************************/ +/*! BIT_initDStream() : + * Initialize a BIT_DStream_t. + * `bitD` : a pointer to an already allocated BIT_DStream_t structure. + * `srcSize` must be the *exact* size of the bitStream, in bytes. + * @return : size of stream (== srcSize), or an errorCode if a problem is detected + */ +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) +{ + if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } + + bitD->start = (const char*)srcBuffer; + bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); + + if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ + bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); + bitD->bitContainer = MEM_readLEST(bitD->ptr); + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ + if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } + } else { + bitD->ptr = bitD->start; + bitD->bitContainer = *(const BYTE*)(bitD->start); + switch(srcSize) + { + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); + /* fall-through */ + + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); + /* fall-through */ + + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); + /* fall-through */ + + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; + /* fall-through */ + + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; + /* fall-through */ + + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; + /* fall-through */ + + default: break; + } + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ + } + bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; + } + + return srcSize; +} + +MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) +{ + return bitContainer >> start; +} + +MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) +{ +#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008 /* experimental */ +# if defined(__x86_64__) + if (sizeof(bitContainer)==8) + return _bextr_u64(bitContainer, start, nbBits); + else +# endif + return _bextr_u32(bitContainer, start, nbBits); +#else + assert(nbBits < BIT_MASK_SIZE); + return (bitContainer >> start) & BIT_mask[nbBits]; +#endif +} + +MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) +{ + assert(nbBits < BIT_MASK_SIZE); + return bitContainer & BIT_mask[nbBits]; +} + +/*! BIT_lookBits() : + * Provides next n bits from local register. + * local register is not modified. + * On 32-bits, maxNbBits==24. + * On 64-bits, maxNbBits==56. + * @return : value extracted */ +MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) +{ +#if defined(__BMI__) && defined(__GNUC__) /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */ + return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits); +#else + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); +#endif +} + +/*! BIT_lookBitsFast() : + * unsafe version; only works if nbBits >= 1 */ +MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) +{ + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + assert(nbBits >= 1); + return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); +} + +MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +{ + bitD->bitsConsumed += nbBits; +} + +/*! BIT_readBits() : + * Read (consume) next n bits from local register and update. + * Pay attention to not read more than nbBits contained into local register. + * @return : extracted value. */ +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t const value = BIT_lookBits(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*! BIT_readBitsFast() : + * unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t const value = BIT_lookBitsFast(bitD, nbBits); + assert(nbBits >= 1); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*! BIT_reloadDStream() : + * Refill `bitD` from buffer previously set in BIT_initDStream() . + * This function is safe, it guarantees it will not read beyond src buffer. + * @return : status of `BIT_DStream_t` internal register. + * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +{ + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ + return BIT_DStream_overflow; + + if (bitD->ptr >= bitD->limitPtr) { + bitD->ptr -= bitD->bitsConsumed >> 3; + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_unfinished; + } + if (bitD->ptr == bitD->start) { + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; + return BIT_DStream_completed; + } + /* start < ptr < limitPtr */ + { U32 nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ + result = BIT_DStream_endOfBuffer; + } + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes*8; + bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ + return result; + } +} + +/*! BIT_endOfDStream() : + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). + */ +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) +{ + return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* BITSTREAM_H_MODULE */ diff --git a/vendor/github.com/DataDog/zstd/compiler.h b/vendor/github.com/DataDog/zstd/compiler.h new file mode 100644 index 00000000000..e90a3bcde36 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/compiler.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPILER_H +#define ZSTD_COMPILER_H + +/*-******************************************************* +* Compiler specifics +*********************************************************/ +/* force inlining */ +#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# define INLINE_KEYWORD inline +#else +# define INLINE_KEYWORD +#endif + +#if defined(__GNUC__) +# define FORCE_INLINE_ATTR __attribute__((always_inline)) +#elif defined(_MSC_VER) +# define FORCE_INLINE_ATTR __forceinline +#else +# define FORCE_INLINE_ATTR +#endif + +/** + * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant + * parameters. They must be inlined for the compiler to elimininate the constant + * branches. + */ +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR +/** + * HINT_INLINE is used to help the compiler generate better code. It is *not* + * used for "templates", so it can be tweaked based on the compilers + * performance. + * + * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the + * always_inline attribute. + * + * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline + * attribute. + */ +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 +# define HINT_INLINE static INLINE_KEYWORD +#else +# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR +#endif + +/* force no inlining */ +#ifdef _MSC_VER +# define FORCE_NOINLINE static __declspec(noinline) +#else +# ifdef __GNUC__ +# define FORCE_NOINLINE static __attribute__((__noinline__)) +# else +# define FORCE_NOINLINE static +# endif +#endif + +/* target attribute */ +#ifndef __has_attribute + #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */ +#endif +#if defined(__GNUC__) +# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) +#else +# define TARGET_ATTRIBUTE(target) +#endif + +/* Enable runtime BMI2 dispatch based on the CPU. + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. + */ +#ifndef DYNAMIC_BMI2 + #if (defined(__clang__) && __has_attribute(__target__)) \ + || (defined(__GNUC__) \ + && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) \ + && (defined(__x86_64__) || defined(_M_X86)) \ + && !defined(__BMI2__) + # define DYNAMIC_BMI2 1 + #else + # define DYNAMIC_BMI2 0 + #endif +#endif + +/* prefetch */ +#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define PREFETCH(ptr) _mm_prefetch((const char*)ptr, _MM_HINT_T0) +#elif defined(__GNUC__) +# define PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) +#else +# define PREFETCH(ptr) /* disabled */ +#endif + +/* disable warnings */ +#ifdef _MSC_VER /* Visual Studio */ +# include /* For Visual 2005 */ +# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +# pragma warning(disable : 4324) /* disable: C4324: padded structure */ +#endif + +#endif /* ZSTD_COMPILER_H */ diff --git a/vendor/github.com/DataDog/zstd/cover.c b/vendor/github.com/DataDog/zstd/cover.c new file mode 100644 index 00000000000..b5a3957a9b9 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/cover.c @@ -0,0 +1,1048 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* ***************************************************************************** + * Constructs a dictionary using a heuristic based on the following paper: + * + * Liao, Petri, Moffat, Wirth + * Effective Construction of Relative Lempel-Ziv Dictionaries + * Published in WWW 2016. + * + * Adapted from code originally written by @ot (Giuseppe Ottaviano). + ******************************************************************************/ + +/*-************************************* +* Dependencies +***************************************/ +#include /* fprintf */ +#include /* malloc, free, qsort */ +#include /* memset */ +#include /* clock */ + +#include "mem.h" /* read */ +#include "pool.h" +#include "threading.h" +#include "zstd_internal.h" /* includes zstd.h */ +#ifndef ZDICT_STATIC_LINKING_ONLY +#define ZDICT_STATIC_LINKING_ONLY +#endif +#include "zdict.h" + +/*-************************************* +* Constants +***************************************/ +#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) + +/*-************************************* +* Console display +***************************************/ +static int g_displayLevel = 2; +#define DISPLAY(...) \ + { \ + fprintf(stderr, __VA_ARGS__); \ + fflush(stderr); \ + } +#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ + if (displayLevel >= l) { \ + DISPLAY(__VA_ARGS__); \ + } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ +#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) + +#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ + if (displayLevel >= l) { \ + if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ + g_time = clock(); \ + DISPLAY(__VA_ARGS__); \ + } \ + } +#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) +static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; +static clock_t g_time = 0; + +/*-************************************* +* Hash table +*************************************** +* A small specialized hash map for storing activeDmers. +* The map does not resize, so if it becomes full it will loop forever. +* Thus, the map must be large enough to store every value. +* The map implements linear probing and keeps its load less than 0.5. +*/ + +#define MAP_EMPTY_VALUE ((U32)-1) +typedef struct COVER_map_pair_t_s { + U32 key; + U32 value; +} COVER_map_pair_t; + +typedef struct COVER_map_s { + COVER_map_pair_t *data; + U32 sizeLog; + U32 size; + U32 sizeMask; +} COVER_map_t; + +/** + * Clear the map. + */ +static void COVER_map_clear(COVER_map_t *map) { + memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t)); +} + +/** + * Initializes a map of the given size. + * Returns 1 on success and 0 on failure. + * The map must be destroyed with COVER_map_destroy(). + * The map is only guaranteed to be large enough to hold size elements. + */ +static int COVER_map_init(COVER_map_t *map, U32 size) { + map->sizeLog = ZSTD_highbit32(size) + 2; + map->size = (U32)1 << map->sizeLog; + map->sizeMask = map->size - 1; + map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t)); + if (!map->data) { + map->sizeLog = 0; + map->size = 0; + return 0; + } + COVER_map_clear(map); + return 1; +} + +/** + * Internal hash function + */ +static const U32 prime4bytes = 2654435761U; +static U32 COVER_map_hash(COVER_map_t *map, U32 key) { + return (key * prime4bytes) >> (32 - map->sizeLog); +} + +/** + * Helper function that returns the index that a key should be placed into. + */ +static U32 COVER_map_index(COVER_map_t *map, U32 key) { + const U32 hash = COVER_map_hash(map, key); + U32 i; + for (i = hash;; i = (i + 1) & map->sizeMask) { + COVER_map_pair_t *pos = &map->data[i]; + if (pos->value == MAP_EMPTY_VALUE) { + return i; + } + if (pos->key == key) { + return i; + } + } +} + +/** + * Returns the pointer to the value for key. + * If key is not in the map, it is inserted and the value is set to 0. + * The map must not be full. + */ +static U32 *COVER_map_at(COVER_map_t *map, U32 key) { + COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)]; + if (pos->value == MAP_EMPTY_VALUE) { + pos->key = key; + pos->value = 0; + } + return &pos->value; +} + +/** + * Deletes key from the map if present. + */ +static void COVER_map_remove(COVER_map_t *map, U32 key) { + U32 i = COVER_map_index(map, key); + COVER_map_pair_t *del = &map->data[i]; + U32 shift = 1; + if (del->value == MAP_EMPTY_VALUE) { + return; + } + for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) { + COVER_map_pair_t *const pos = &map->data[i]; + /* If the position is empty we are done */ + if (pos->value == MAP_EMPTY_VALUE) { + del->value = MAP_EMPTY_VALUE; + return; + } + /* If pos can be moved to del do so */ + if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) { + del->key = pos->key; + del->value = pos->value; + del = pos; + shift = 1; + } else { + ++shift; + } + } +} + +/** + * Destroyes a map that is inited with COVER_map_init(). + */ +static void COVER_map_destroy(COVER_map_t *map) { + if (map->data) { + free(map->data); + } + map->data = NULL; + map->size = 0; +} + +/*-************************************* +* Context +***************************************/ + +typedef struct { + const BYTE *samples; + size_t *offsets; + const size_t *samplesSizes; + size_t nbSamples; + U32 *suffix; + size_t suffixSize; + U32 *freqs; + U32 *dmerAt; + unsigned d; +} COVER_ctx_t; + +/* We need a global context for qsort... */ +static COVER_ctx_t *g_ctx = NULL; + +/*-************************************* +* Helper functions +***************************************/ + +/** + * Returns the sum of the sample sizes. + */ +static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { + size_t sum = 0; + size_t i; + for (i = 0; i < nbSamples; ++i) { + sum += samplesSizes[i]; + } + return sum; +} + +/** + * Returns -1 if the dmer at lp is less than the dmer at rp. + * Return 0 if the dmers at lp and rp are equal. + * Returns 1 if the dmer at lp is greater than the dmer at rp. + */ +static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) { + U32 const lhs = *(U32 const *)lp; + U32 const rhs = *(U32 const *)rp; + return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d); +} +/** + * Faster version for d <= 8. + */ +static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { + U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1); + U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask; + U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask; + if (lhs < rhs) { + return -1; + } + return (lhs > rhs); +} + +/** + * Same as COVER_cmp() except ties are broken by pointer value + * NOTE: g_ctx must be set to call this function. A global is required because + * qsort doesn't take an opaque pointer. + */ +static int COVER_strict_cmp(const void *lp, const void *rp) { + int result = COVER_cmp(g_ctx, lp, rp); + if (result == 0) { + result = lp < rp ? -1 : 1; + } + return result; +} +/** + * Faster version for d <= 8. + */ +static int COVER_strict_cmp8(const void *lp, const void *rp) { + int result = COVER_cmp8(g_ctx, lp, rp); + if (result == 0) { + result = lp < rp ? -1 : 1; + } + return result; +} + +/** + * Returns the first pointer in [first, last) whose element does not compare + * less than value. If no such element exists it returns last. + */ +static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, + size_t value) { + size_t count = last - first; + while (count != 0) { + size_t step = count / 2; + const size_t *ptr = first; + ptr += step; + if (*ptr < value) { + first = ++ptr; + count -= step + 1; + } else { + count = step; + } + } + return first; +} + +/** + * Generic groupBy function. + * Groups an array sorted by cmp into groups with equivalent values. + * Calls grp for each group. + */ +static void +COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx, + int (*cmp)(COVER_ctx_t *, const void *, const void *), + void (*grp)(COVER_ctx_t *, const void *, const void *)) { + const BYTE *ptr = (const BYTE *)data; + size_t num = 0; + while (num < count) { + const BYTE *grpEnd = ptr + size; + ++num; + while (num < count && cmp(ctx, ptr, grpEnd) == 0) { + grpEnd += size; + ++num; + } + grp(ctx, ptr, grpEnd); + ptr = grpEnd; + } +} + +/*-************************************* +* Cover functions +***************************************/ + +/** + * Called on each group of positions with the same dmer. + * Counts the frequency of each dmer and saves it in the suffix array. + * Fills `ctx->dmerAt`. + */ +static void COVER_group(COVER_ctx_t *ctx, const void *group, + const void *groupEnd) { + /* The group consists of all the positions with the same first d bytes. */ + const U32 *grpPtr = (const U32 *)group; + const U32 *grpEnd = (const U32 *)groupEnd; + /* The dmerId is how we will reference this dmer. + * This allows us to map the whole dmer space to a much smaller space, the + * size of the suffix array. + */ + const U32 dmerId = (U32)(grpPtr - ctx->suffix); + /* Count the number of samples this dmer shows up in */ + U32 freq = 0; + /* Details */ + const size_t *curOffsetPtr = ctx->offsets; + const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples; + /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a + * different sample than the last. + */ + size_t curSampleEnd = ctx->offsets[0]; + for (; grpPtr != grpEnd; ++grpPtr) { + /* Save the dmerId for this position so we can get back to it. */ + ctx->dmerAt[*grpPtr] = dmerId; + /* Dictionaries only help for the first reference to the dmer. + * After that zstd can reference the match from the previous reference. + * So only count each dmer once for each sample it is in. + */ + if (*grpPtr < curSampleEnd) { + continue; + } + freq += 1; + /* Binary search to find the end of the sample *grpPtr is in. + * In the common case that grpPtr + 1 == grpEnd we can skip the binary + * search because the loop is over. + */ + if (grpPtr + 1 != grpEnd) { + const size_t *sampleEndPtr = + COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr); + curSampleEnd = *sampleEndPtr; + curOffsetPtr = sampleEndPtr + 1; + } + } + /* At this point we are never going to look at this segment of the suffix + * array again. We take advantage of this fact to save memory. + * We store the frequency of the dmer in the first position of the group, + * which is dmerId. + */ + ctx->suffix[dmerId] = freq; +} + +/** + * A segment is a range in the source as well as the score of the segment. + */ +typedef struct { + U32 begin; + U32 end; + U32 score; +} COVER_segment_t; + +/** + * Selects the best segment in an epoch. + * Segments of are scored according to the function: + * + * Let F(d) be the frequency of dmer d. + * Let S_i be the dmer at position i of segment S which has length k. + * + * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) + * + * Once the dmer d is in the dictionay we set F(d) = 0. + */ +static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs, + COVER_map_t *activeDmers, U32 begin, + U32 end, + ZDICT_cover_params_t parameters) { + /* Constants */ + const U32 k = parameters.k; + const U32 d = parameters.d; + const U32 dmersInK = k - d + 1; + /* Try each segment (activeSegment) and save the best (bestSegment) */ + COVER_segment_t bestSegment = {0, 0, 0}; + COVER_segment_t activeSegment; + /* Reset the activeDmers in the segment */ + COVER_map_clear(activeDmers); + /* The activeSegment starts at the beginning of the epoch. */ + activeSegment.begin = begin; + activeSegment.end = begin; + activeSegment.score = 0; + /* Slide the activeSegment through the whole epoch. + * Save the best segment in bestSegment. + */ + while (activeSegment.end < end) { + /* The dmerId for the dmer at the next position */ + U32 newDmer = ctx->dmerAt[activeSegment.end]; + /* The entry in activeDmers for this dmerId */ + U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer); + /* If the dmer isn't already present in the segment add its score. */ + if (*newDmerOcc == 0) { + /* The paper suggest using the L-0.5 norm, but experiments show that it + * doesn't help. + */ + activeSegment.score += freqs[newDmer]; + } + /* Add the dmer to the segment */ + activeSegment.end += 1; + *newDmerOcc += 1; + + /* If the window is now too large, drop the first position */ + if (activeSegment.end - activeSegment.begin == dmersInK + 1) { + U32 delDmer = ctx->dmerAt[activeSegment.begin]; + U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer); + activeSegment.begin += 1; + *delDmerOcc -= 1; + /* If this is the last occurence of the dmer, subtract its score */ + if (*delDmerOcc == 0) { + COVER_map_remove(activeDmers, delDmer); + activeSegment.score -= freqs[delDmer]; + } + } + + /* If this segment is the best so far save it */ + if (activeSegment.score > bestSegment.score) { + bestSegment = activeSegment; + } + } + { + /* Trim off the zero frequency head and tail from the segment. */ + U32 newBegin = bestSegment.end; + U32 newEnd = bestSegment.begin; + U32 pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { + U32 freq = freqs[ctx->dmerAt[pos]]; + if (freq != 0) { + newBegin = MIN(newBegin, pos); + newEnd = pos + 1; + } + } + bestSegment.begin = newBegin; + bestSegment.end = newEnd; + } + { + /* Zero out the frequency of each dmer covered by the chosen segment. */ + U32 pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { + freqs[ctx->dmerAt[pos]] = 0; + } + } + return bestSegment; +} + +/** + * Check the validity of the parameters. + * Returns non-zero if the parameters are valid and 0 otherwise. + */ +static int COVER_checkParameters(ZDICT_cover_params_t parameters, + size_t maxDictSize) { + /* k and d are required parameters */ + if (parameters.d == 0 || parameters.k == 0) { + return 0; + } + /* k <= maxDictSize */ + if (parameters.k > maxDictSize) { + return 0; + } + /* d <= k */ + if (parameters.d > parameters.k) { + return 0; + } + return 1; +} + +/** + * Clean up a context initialized with `COVER_ctx_init()`. + */ +static void COVER_ctx_destroy(COVER_ctx_t *ctx) { + if (!ctx) { + return; + } + if (ctx->suffix) { + free(ctx->suffix); + ctx->suffix = NULL; + } + if (ctx->freqs) { + free(ctx->freqs); + ctx->freqs = NULL; + } + if (ctx->dmerAt) { + free(ctx->dmerAt); + ctx->dmerAt = NULL; + } + if (ctx->offsets) { + free(ctx->offsets); + ctx->offsets = NULL; + } +} + +/** + * Prepare a context for dictionary building. + * The context is only dependent on the parameter `d` and can used multiple + * times. + * Returns 1 on success or zero on error. + * The context must be destroyed with `COVER_ctx_destroy()`. + */ +static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, + const size_t *samplesSizes, unsigned nbSamples, + unsigned d) { + const BYTE *const samples = (const BYTE *)samplesBuffer; + const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); + /* Checks */ + if (totalSamplesSize < MAX(d, sizeof(U64)) || + totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { + DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", + (U32)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20)); + return 0; + } + /* Zero the context */ + memset(ctx, 0, sizeof(*ctx)); + DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples, + (U32)totalSamplesSize); + ctx->samples = samples; + ctx->samplesSizes = samplesSizes; + ctx->nbSamples = nbSamples; + /* Partial suffix array */ + ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1; + ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + /* Maps index to the dmerID */ + ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + /* The offsets of each file */ + ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); + if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) { + DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); + COVER_ctx_destroy(ctx); + return 0; + } + ctx->freqs = NULL; + ctx->d = d; + + /* Fill offsets from the samlesSizes */ + { + U32 i; + ctx->offsets[0] = 0; + for (i = 1; i <= nbSamples; ++i) { + ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; + } + } + DISPLAYLEVEL(2, "Constructing partial suffix array\n"); + { + /* suffix is a partial suffix array. + * It only sorts suffixes by their first parameters.d bytes. + * The sort is stable, so each dmer group is sorted by position in input. + */ + U32 i; + for (i = 0; i < ctx->suffixSize; ++i) { + ctx->suffix[i] = i; + } + /* qsort doesn't take an opaque pointer, so pass as a global */ + g_ctx = ctx; + qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); + } + DISPLAYLEVEL(2, "Computing frequencies\n"); + /* For each dmer group (group of positions with the same first d bytes): + * 1. For each position we set dmerAt[position] = dmerID. The dmerID is + * (groupBeginPtr - suffix). This allows us to go from position to + * dmerID so we can look up values in freq. + * 2. We calculate how many samples the dmer occurs in and save it in + * freqs[dmerId]. + */ + COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, + (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group); + ctx->freqs = ctx->suffix; + ctx->suffix = NULL; + return 1; +} + +/** + * Given the prepared context build the dictionary. + */ +static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, + COVER_map_t *activeDmers, void *dictBuffer, + size_t dictBufferCapacity, + ZDICT_cover_params_t parameters) { + BYTE *const dict = (BYTE *)dictBuffer; + size_t tail = dictBufferCapacity; + /* Divide the data up into epochs of equal size. + * We will select at least one segment from each epoch. + */ + const U32 epochs = (U32)(dictBufferCapacity / parameters.k); + const U32 epochSize = (U32)(ctx->suffixSize / epochs); + size_t epoch; + DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, + epochSize); + /* Loop through the epochs until there are no more segments or the dictionary + * is full. + */ + for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) { + const U32 epochBegin = (U32)(epoch * epochSize); + const U32 epochEnd = epochBegin + epochSize; + size_t segmentSize; + /* Select a segment */ + COVER_segment_t segment = COVER_selectSegment( + ctx, freqs, activeDmers, epochBegin, epochEnd, parameters); + /* If the segment covers no dmers, then we are out of content */ + if (segment.score == 0) { + break; + } + /* Trim the segment if necessary and if it is too small then we are done */ + segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); + if (segmentSize < parameters.d) { + break; + } + /* We fill the dictionary from the back to allow the best segments to be + * referenced with the smallest offsets. + */ + tail -= segmentSize; + memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); + DISPLAYUPDATE( + 2, "\r%u%% ", + (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); + } + DISPLAYLEVEL(2, "\r%79s\r", ""); + return tail; +} + +ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( + void *dictBuffer, size_t dictBufferCapacity, + const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t parameters) +{ + BYTE* const dict = (BYTE*)dictBuffer; + COVER_ctx_t ctx; + COVER_map_t activeDmers; + + /* Initialize global data */ + g_displayLevel = parameters.zParams.notificationLevel; + /* Checks */ + if (!COVER_checkParameters(parameters, dictBufferCapacity)) { + DISPLAYLEVEL(1, "Cover parameters incorrect\n"); + return ERROR(GENERIC); + } + if (nbSamples == 0) { + DISPLAYLEVEL(1, "Cover must have at least one input file\n"); + return ERROR(GENERIC); + } + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", + ZDICT_DICTSIZE_MIN); + return ERROR(dstSize_tooSmall); + } + /* Initialize context and activeDmers */ + if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, + parameters.d)) { + return ERROR(GENERIC); + } + if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { + DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); + COVER_ctx_destroy(&ctx); + return ERROR(GENERIC); + } + + DISPLAYLEVEL(2, "Building dictionary\n"); + { + const size_t tail = + COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer, + dictBufferCapacity, parameters); + const size_t dictionarySize = ZDICT_finalizeDictionary( + dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, + samplesBuffer, samplesSizes, nbSamples, parameters.zParams); + if (!ZSTD_isError(dictionarySize)) { + DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", + (U32)dictionarySize); + } + COVER_ctx_destroy(&ctx); + COVER_map_destroy(&activeDmers); + return dictionarySize; + } +} + +/** + * COVER_best_t is used for two purposes: + * 1. Synchronizing threads. + * 2. Saving the best parameters and dictionary. + * + * All of the methods except COVER_best_init() are thread safe if zstd is + * compiled with multithreaded support. + */ +typedef struct COVER_best_s { + ZSTD_pthread_mutex_t mutex; + ZSTD_pthread_cond_t cond; + size_t liveJobs; + void *dict; + size_t dictSize; + ZDICT_cover_params_t parameters; + size_t compressedSize; +} COVER_best_t; + +/** + * Initialize the `COVER_best_t`. + */ +static void COVER_best_init(COVER_best_t *best) { + if (best==NULL) return; /* compatible with init on NULL */ + (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); + (void)ZSTD_pthread_cond_init(&best->cond, NULL); + best->liveJobs = 0; + best->dict = NULL; + best->dictSize = 0; + best->compressedSize = (size_t)-1; + memset(&best->parameters, 0, sizeof(best->parameters)); +} + +/** + * Wait until liveJobs == 0. + */ +static void COVER_best_wait(COVER_best_t *best) { + if (!best) { + return; + } + ZSTD_pthread_mutex_lock(&best->mutex); + while (best->liveJobs != 0) { + ZSTD_pthread_cond_wait(&best->cond, &best->mutex); + } + ZSTD_pthread_mutex_unlock(&best->mutex); +} + +/** + * Call COVER_best_wait() and then destroy the COVER_best_t. + */ +static void COVER_best_destroy(COVER_best_t *best) { + if (!best) { + return; + } + COVER_best_wait(best); + if (best->dict) { + free(best->dict); + } + ZSTD_pthread_mutex_destroy(&best->mutex); + ZSTD_pthread_cond_destroy(&best->cond); +} + +/** + * Called when a thread is about to be launched. + * Increments liveJobs. + */ +static void COVER_best_start(COVER_best_t *best) { + if (!best) { + return; + } + ZSTD_pthread_mutex_lock(&best->mutex); + ++best->liveJobs; + ZSTD_pthread_mutex_unlock(&best->mutex); +} + +/** + * Called when a thread finishes executing, both on error or success. + * Decrements liveJobs and signals any waiting threads if liveJobs == 0. + * If this dictionary is the best so far save it and its parameters. + */ +static void COVER_best_finish(COVER_best_t *best, size_t compressedSize, + ZDICT_cover_params_t parameters, void *dict, + size_t dictSize) { + if (!best) { + return; + } + { + size_t liveJobs; + ZSTD_pthread_mutex_lock(&best->mutex); + --best->liveJobs; + liveJobs = best->liveJobs; + /* If the new dictionary is better */ + if (compressedSize < best->compressedSize) { + /* Allocate space if necessary */ + if (!best->dict || best->dictSize < dictSize) { + if (best->dict) { + free(best->dict); + } + best->dict = malloc(dictSize); + if (!best->dict) { + best->compressedSize = ERROR(GENERIC); + best->dictSize = 0; + return; + } + } + /* Save the dictionary, parameters, and size */ + memcpy(best->dict, dict, dictSize); + best->dictSize = dictSize; + best->parameters = parameters; + best->compressedSize = compressedSize; + } + ZSTD_pthread_mutex_unlock(&best->mutex); + if (liveJobs == 0) { + ZSTD_pthread_cond_broadcast(&best->cond); + } + } +} + +/** + * Parameters for COVER_tryParameters(). + */ +typedef struct COVER_tryParameters_data_s { + const COVER_ctx_t *ctx; + COVER_best_t *best; + size_t dictBufferCapacity; + ZDICT_cover_params_t parameters; +} COVER_tryParameters_data_t; + +/** + * Tries a set of parameters and upates the COVER_best_t with the results. + * This function is thread safe if zstd is compiled with multithreaded support. + * It takes its parameters as an *OWNING* opaque pointer to support threading. + */ +static void COVER_tryParameters(void *opaque) { + /* Save parameters as local variables */ + COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque; + const COVER_ctx_t *const ctx = data->ctx; + const ZDICT_cover_params_t parameters = data->parameters; + size_t dictBufferCapacity = data->dictBufferCapacity; + size_t totalCompressedSize = ERROR(GENERIC); + /* Allocate space for hash table, dict, and freqs */ + COVER_map_t activeDmers; + BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); + U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { + DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); + goto _cleanup; + } + if (!dict || !freqs) { + DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); + goto _cleanup; + } + /* Copy the frequencies because we need to modify them */ + memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32)); + /* Build the dictionary */ + { + const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, + dictBufferCapacity, parameters); + dictBufferCapacity = ZDICT_finalizeDictionary( + dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, + ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples, + parameters.zParams); + if (ZDICT_isError(dictBufferCapacity)) { + DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); + goto _cleanup; + } + } + /* Check total compressed size */ + { + /* Pointers */ + ZSTD_CCtx *cctx; + ZSTD_CDict *cdict; + void *dst; + /* Local variables */ + size_t dstCapacity; + size_t i; + /* Allocate dst with enough space to compress the maximum sized sample */ + { + size_t maxSampleSize = 0; + for (i = 0; i < ctx->nbSamples; ++i) { + maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize); + } + dstCapacity = ZSTD_compressBound(maxSampleSize); + dst = malloc(dstCapacity); + } + /* Create the cctx and cdict */ + cctx = ZSTD_createCCtx(); + cdict = ZSTD_createCDict(dict, dictBufferCapacity, + parameters.zParams.compressionLevel); + if (!dst || !cctx || !cdict) { + goto _compressCleanup; + } + /* Compress each sample and sum their sizes (or error) */ + totalCompressedSize = dictBufferCapacity; + for (i = 0; i < ctx->nbSamples; ++i) { + const size_t size = ZSTD_compress_usingCDict( + cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i], + ctx->samplesSizes[i], cdict); + if (ZSTD_isError(size)) { + totalCompressedSize = ERROR(GENERIC); + goto _compressCleanup; + } + totalCompressedSize += size; + } + _compressCleanup: + ZSTD_freeCCtx(cctx); + ZSTD_freeCDict(cdict); + if (dst) { + free(dst); + } + } + +_cleanup: + COVER_best_finish(data->best, totalCompressedSize, parameters, dict, + dictBufferCapacity); + free(data); + COVER_map_destroy(&activeDmers); + if (dict) { + free(dict); + } + if (freqs) { + free(freqs); + } +} + +ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( + void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, + const size_t *samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t *parameters) { + /* constants */ + const unsigned nbThreads = parameters->nbThreads; + const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; + const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; + const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; + const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; + const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; + const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); + const unsigned kIterations = + (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); + /* Local variables */ + const int displayLevel = parameters->zParams.notificationLevel; + unsigned iteration = 1; + unsigned d; + unsigned k; + COVER_best_t best; + POOL_ctx *pool = NULL; + + /* Checks */ + if (kMinK < kMaxD || kMaxK < kMinK) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); + return ERROR(GENERIC); + } + if (nbSamples == 0) { + DISPLAYLEVEL(1, "Cover must have at least one input file\n"); + return ERROR(GENERIC); + } + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", + ZDICT_DICTSIZE_MIN); + return ERROR(dstSize_tooSmall); + } + if (nbThreads > 1) { + pool = POOL_create(nbThreads, 1); + if (!pool) { + return ERROR(memory_allocation); + } + } + /* Initialization */ + COVER_best_init(&best); + /* Turn down global display level to clean up display at level 2 and below */ + g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; + /* Loop through d first because each new value needs a new context */ + LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", + kIterations); + for (d = kMinD; d <= kMaxD; d += 2) { + /* Initialize the context for this value of d */ + COVER_ctx_t ctx; + LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); + if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); + COVER_best_destroy(&best); + POOL_free(pool); + return ERROR(GENERIC); + } + /* Loop through k reusing the same context */ + for (k = kMinK; k <= kMaxK; k += kStepSize) { + /* Prepare the arguments */ + COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( + sizeof(COVER_tryParameters_data_t)); + LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); + if (!data) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); + COVER_best_destroy(&best); + COVER_ctx_destroy(&ctx); + POOL_free(pool); + return ERROR(GENERIC); + } + data->ctx = &ctx; + data->best = &best; + data->dictBufferCapacity = dictBufferCapacity; + data->parameters = *parameters; + data->parameters.k = k; + data->parameters.d = d; + data->parameters.steps = kSteps; + data->parameters.zParams.notificationLevel = g_displayLevel; + /* Check the parameters */ + if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { + DISPLAYLEVEL(1, "Cover parameters incorrect\n"); + free(data); + continue; + } + /* Call the function and pass ownership of data to it */ + COVER_best_start(&best); + if (pool) { + POOL_add(pool, &COVER_tryParameters, data); + } else { + COVER_tryParameters(data); + } + /* Print status */ + LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", + (U32)((iteration * 100) / kIterations)); + ++iteration; + } + COVER_best_wait(&best); + COVER_ctx_destroy(&ctx); + } + LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); + /* Fill the output buffer and parameters with output of the best parameters */ + { + const size_t dictSize = best.dictSize; + if (ZSTD_isError(best.compressedSize)) { + const size_t compressedSize = best.compressedSize; + COVER_best_destroy(&best); + POOL_free(pool); + return compressedSize; + } + *parameters = best.parameters; + memcpy(dictBuffer, best.dict, dictSize); + COVER_best_destroy(&best); + POOL_free(pool); + return dictSize; + } +} diff --git a/vendor/github.com/DataDog/zstd/cpu.h b/vendor/github.com/DataDog/zstd/cpu.h new file mode 100644 index 00000000000..4eb48e39e10 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/cpu.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2018-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMMON_CPU_H +#define ZSTD_COMMON_CPU_H + +/** + * Implementation taken from folly/CpuId.h + * https://github.com/facebook/folly/blob/master/folly/CpuId.h + */ + +#include + +#include "mem.h" + +#ifdef _MSC_VER +#include +#endif + +typedef struct { + U32 f1c; + U32 f1d; + U32 f7b; + U32 f7c; +} ZSTD_cpuid_t; + +MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { + U32 f1c = 0; + U32 f1d = 0; + U32 f7b = 0; + U32 f7c = 0; +#ifdef _MSC_VER + int reg[4]; + __cpuid((int*)reg, 0); + { + int const n = reg[0]; + if (n >= 1) { + __cpuid((int*)reg, 1); + f1c = (U32)reg[2]; + f1d = (U32)reg[3]; + } + if (n >= 7) { + __cpuidex((int*)reg, 7, 0); + f7b = (U32)reg[1]; + f7c = (U32)reg[2]; + } + } +#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) + /* The following block like the normal cpuid branch below, but gcc + * reserves ebx for use of its pic register so we must specially + * handle the save and restore to avoid clobbering the register + */ + U32 n; + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(n) + : "a"(0) + : "ecx", "edx"); + if (n >= 1) { + U32 f1a; + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(f1a), "=c"(f1c), "=d"(f1d) + : "a"(1) + :); + } + if (n >= 7) { + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "movl %%ebx, %%eax\n\r" + "popl %%ebx" + : "=a"(f7b), "=c"(f7c) + : "a"(7), "c"(0) + : "edx"); + } +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) + U32 n; + __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); + if (n >= 1) { + U32 f1a; + __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx"); + } + if (n >= 7) { + U32 f7a; + __asm__("cpuid" + : "=a"(f7a), "=b"(f7b), "=c"(f7c) + : "a"(7), "c"(0) + : "edx"); + } +#endif + { + ZSTD_cpuid_t cpuid; + cpuid.f1c = f1c; + cpuid.f1d = f1d; + cpuid.f7b = f7b; + cpuid.f7c = f7c; + return cpuid; + } +} + +#define X(name, r, bit) \ + MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \ + return ((cpuid.r) & (1U << bit)) != 0; \ + } + +/* cpuid(1): Processor Info and Feature Bits. */ +#define C(name, bit) X(name, f1c, bit) + C(sse3, 0) + C(pclmuldq, 1) + C(dtes64, 2) + C(monitor, 3) + C(dscpl, 4) + C(vmx, 5) + C(smx, 6) + C(eist, 7) + C(tm2, 8) + C(ssse3, 9) + C(cnxtid, 10) + C(fma, 12) + C(cx16, 13) + C(xtpr, 14) + C(pdcm, 15) + C(pcid, 17) + C(dca, 18) + C(sse41, 19) + C(sse42, 20) + C(x2apic, 21) + C(movbe, 22) + C(popcnt, 23) + C(tscdeadline, 24) + C(aes, 25) + C(xsave, 26) + C(osxsave, 27) + C(avx, 28) + C(f16c, 29) + C(rdrand, 30) +#undef C +#define D(name, bit) X(name, f1d, bit) + D(fpu, 0) + D(vme, 1) + D(de, 2) + D(pse, 3) + D(tsc, 4) + D(msr, 5) + D(pae, 6) + D(mce, 7) + D(cx8, 8) + D(apic, 9) + D(sep, 11) + D(mtrr, 12) + D(pge, 13) + D(mca, 14) + D(cmov, 15) + D(pat, 16) + D(pse36, 17) + D(psn, 18) + D(clfsh, 19) + D(ds, 21) + D(acpi, 22) + D(mmx, 23) + D(fxsr, 24) + D(sse, 25) + D(sse2, 26) + D(ss, 27) + D(htt, 28) + D(tm, 29) + D(pbe, 31) +#undef D + +/* cpuid(7): Extended Features. */ +#define B(name, bit) X(name, f7b, bit) + B(bmi1, 3) + B(hle, 4) + B(avx2, 5) + B(smep, 7) + B(bmi2, 8) + B(erms, 9) + B(invpcid, 10) + B(rtm, 11) + B(mpx, 14) + B(avx512f, 16) + B(avx512dq, 17) + B(rdseed, 18) + B(adx, 19) + B(smap, 20) + B(avx512ifma, 21) + B(pcommit, 22) + B(clflushopt, 23) + B(clwb, 24) + B(avx512pf, 26) + B(avx512er, 27) + B(avx512cd, 28) + B(sha, 29) + B(avx512bw, 30) + B(avx512vl, 31) +#undef B +#define C(name, bit) X(name, f7c, bit) + C(prefetchwt1, 0) + C(avx512vbmi, 1) +#undef C + +#undef X + +#endif /* ZSTD_COMMON_CPU_H */ diff --git a/vendor/github.com/DataDog/zstd/divsufsort.c b/vendor/github.com/DataDog/zstd/divsufsort.c new file mode 100644 index 00000000000..60cceb08832 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/divsufsort.c @@ -0,0 +1,1913 @@ +/* + * divsufsort.c for libdivsufsort-lite + * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/*- Compiler specifics -*/ +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wshorten-64-to-32" +#endif + +#if defined(_MSC_VER) +# pragma warning(disable : 4244) +# pragma warning(disable : 4127) /* C4127 : Condition expression is constant */ +#endif + + +/*- Dependencies -*/ +#include +#include +#include + +#include "divsufsort.h" + +/*- Constants -*/ +#if defined(INLINE) +# undef INLINE +#endif +#if !defined(INLINE) +# define INLINE __inline +#endif +#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1) +# undef ALPHABET_SIZE +#endif +#if !defined(ALPHABET_SIZE) +# define ALPHABET_SIZE (256) +#endif +#define BUCKET_A_SIZE (ALPHABET_SIZE) +#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE) +#if defined(SS_INSERTIONSORT_THRESHOLD) +# if SS_INSERTIONSORT_THRESHOLD < 1 +# undef SS_INSERTIONSORT_THRESHOLD +# define SS_INSERTIONSORT_THRESHOLD (1) +# endif +#else +# define SS_INSERTIONSORT_THRESHOLD (8) +#endif +#if defined(SS_BLOCKSIZE) +# if SS_BLOCKSIZE < 0 +# undef SS_BLOCKSIZE +# define SS_BLOCKSIZE (0) +# elif 32768 <= SS_BLOCKSIZE +# undef SS_BLOCKSIZE +# define SS_BLOCKSIZE (32767) +# endif +#else +# define SS_BLOCKSIZE (1024) +#endif +/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */ +#if SS_BLOCKSIZE == 0 +# define SS_MISORT_STACKSIZE (96) +#elif SS_BLOCKSIZE <= 4096 +# define SS_MISORT_STACKSIZE (16) +#else +# define SS_MISORT_STACKSIZE (24) +#endif +#define SS_SMERGE_STACKSIZE (32) +#define TR_INSERTIONSORT_THRESHOLD (8) +#define TR_STACKSIZE (64) + + +/*- Macros -*/ +#ifndef SWAP +# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0) +#endif /* SWAP */ +#ifndef MIN +# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) +#endif /* MIN */ +#ifndef MAX +# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b)) +#endif /* MAX */ +#define STACK_PUSH(_a, _b, _c, _d)\ + do {\ + assert(ssize < STACK_SIZE);\ + stack[ssize].a = (_a), stack[ssize].b = (_b),\ + stack[ssize].c = (_c), stack[ssize++].d = (_d);\ + } while(0) +#define STACK_PUSH5(_a, _b, _c, _d, _e)\ + do {\ + assert(ssize < STACK_SIZE);\ + stack[ssize].a = (_a), stack[ssize].b = (_b),\ + stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\ + } while(0) +#define STACK_POP(_a, _b, _c, _d)\ + do {\ + assert(0 <= ssize);\ + if(ssize == 0) { return; }\ + (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ + (_c) = stack[ssize].c, (_d) = stack[ssize].d;\ + } while(0) +#define STACK_POP5(_a, _b, _c, _d, _e)\ + do {\ + assert(0 <= ssize);\ + if(ssize == 0) { return; }\ + (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ + (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\ + } while(0) +#define BUCKET_A(_c0) bucket_A[(_c0)] +#if ALPHABET_SIZE == 256 +#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)]) +#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)]) +#else +#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)]) +#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)]) +#endif + + +/*- Private Functions -*/ + +static const int lg_table[256]= { + -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 +}; + +#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) + +static INLINE +int +ss_ilg(int n) { +#if SS_BLOCKSIZE == 0 + return (n & 0xffff0000) ? + ((n & 0xff000000) ? + 24 + lg_table[(n >> 24) & 0xff] : + 16 + lg_table[(n >> 16) & 0xff]) : + ((n & 0x0000ff00) ? + 8 + lg_table[(n >> 8) & 0xff] : + 0 + lg_table[(n >> 0) & 0xff]); +#elif SS_BLOCKSIZE < 256 + return lg_table[n]; +#else + return (n & 0xff00) ? + 8 + lg_table[(n >> 8) & 0xff] : + 0 + lg_table[(n >> 0) & 0xff]; +#endif +} + +#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ + +#if SS_BLOCKSIZE != 0 + +static const int sqq_table[256] = { + 0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61, + 64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89, + 90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109, +110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, +128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, +143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155, +156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168, +169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180, +181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191, +192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201, +202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211, +212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221, +221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230, +230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238, +239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247, +247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255 +}; + +static INLINE +int +ss_isqrt(int x) { + int y, e; + + if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; } + e = (x & 0xffff0000) ? + ((x & 0xff000000) ? + 24 + lg_table[(x >> 24) & 0xff] : + 16 + lg_table[(x >> 16) & 0xff]) : + ((x & 0x0000ff00) ? + 8 + lg_table[(x >> 8) & 0xff] : + 0 + lg_table[(x >> 0) & 0xff]); + + if(e >= 16) { + y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7); + if(e >= 24) { y = (y + 1 + x / y) >> 1; } + y = (y + 1 + x / y) >> 1; + } else if(e >= 8) { + y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1; + } else { + return sqq_table[x] >> 4; + } + + return (x < (y * y)) ? y - 1 : y; +} + +#endif /* SS_BLOCKSIZE != 0 */ + + +/*---------------------------------------------------------------------------*/ + +/* Compares two suffixes. */ +static INLINE +int +ss_compare(const unsigned char *T, + const int *p1, const int *p2, + int depth) { + const unsigned char *U1, *U2, *U1n, *U2n; + + for(U1 = T + depth + *p1, + U2 = T + depth + *p2, + U1n = T + *(p1 + 1) + 2, + U2n = T + *(p2 + 1) + 2; + (U1 < U1n) && (U2 < U2n) && (*U1 == *U2); + ++U1, ++U2) { + } + + return U1 < U1n ? + (U2 < U2n ? *U1 - *U2 : 1) : + (U2 < U2n ? -1 : 0); +} + + +/*---------------------------------------------------------------------------*/ + +#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) + +/* Insertionsort for small size groups */ +static +void +ss_insertionsort(const unsigned char *T, const int *PA, + int *first, int *last, int depth) { + int *i, *j; + int t; + int r; + + for(i = last - 2; first <= i; --i) { + for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) { + do { *(j - 1) = *j; } while((++j < last) && (*j < 0)); + if(last <= j) { break; } + } + if(r == 0) { *j = ~*j; } + *(j - 1) = t; + } +} + +#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */ + + +/*---------------------------------------------------------------------------*/ + +#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) + +static INLINE +void +ss_fixdown(const unsigned char *Td, const int *PA, + int *SA, int i, int size) { + int j, k; + int v; + int c, d, e; + + for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { + d = Td[PA[SA[k = j++]]]; + if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; } + if(d <= c) { break; } + } + SA[i] = v; +} + +/* Simple top-down heapsort. */ +static +void +ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) { + int i, m; + int t; + + m = size; + if((size % 2) == 0) { + m--; + if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); } + } + + for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); } + if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); } + for(i = m - 1; 0 < i; --i) { + t = SA[0], SA[0] = SA[i]; + ss_fixdown(Td, PA, SA, 0, i); + SA[i] = t; + } +} + + +/*---------------------------------------------------------------------------*/ + +/* Returns the median of three elements. */ +static INLINE +int * +ss_median3(const unsigned char *Td, const int *PA, + int *v1, int *v2, int *v3) { + int *t; + if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); } + if(Td[PA[*v2]] > Td[PA[*v3]]) { + if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; } + else { return v3; } + } + return v2; +} + +/* Returns the median of five elements. */ +static INLINE +int * +ss_median5(const unsigned char *Td, const int *PA, + int *v1, int *v2, int *v3, int *v4, int *v5) { + int *t; + if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); } + if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); } + if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); } + if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); } + if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); } + if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; } + return v3; +} + +/* Returns the pivot element. */ +static INLINE +int * +ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) { + int *middle; + int t; + + t = last - first; + middle = first + t / 2; + + if(t <= 512) { + if(t <= 32) { + return ss_median3(Td, PA, first, middle, last - 1); + } else { + t >>= 2; + return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1); + } + } + t >>= 3; + first = ss_median3(Td, PA, first, first + t, first + (t << 1)); + middle = ss_median3(Td, PA, middle - t, middle, middle + t); + last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1); + return ss_median3(Td, PA, first, middle, last); +} + + +/*---------------------------------------------------------------------------*/ + +/* Binary partition for substrings. */ +static INLINE +int * +ss_partition(const int *PA, + int *first, int *last, int depth) { + int *a, *b; + int t; + for(a = first - 1, b = last;;) { + for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; } + for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { } + if(b <= a) { break; } + t = ~*b; + *b = *a; + *a = t; + } + if(first < a) { *first = ~*first; } + return a; +} + +/* Multikey introsort for medium size groups. */ +static +void +ss_mintrosort(const unsigned char *T, const int *PA, + int *first, int *last, + int depth) { +#define STACK_SIZE SS_MISORT_STACKSIZE + struct { int *a, *b, c; int d; } stack[STACK_SIZE]; + const unsigned char *Td; + int *a, *b, *c, *d, *e, *f; + int s, t; + int ssize; + int limit; + int v, x = 0; + + for(ssize = 0, limit = ss_ilg(last - first);;) { + + if((last - first) <= SS_INSERTIONSORT_THRESHOLD) { +#if 1 < SS_INSERTIONSORT_THRESHOLD + if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); } +#endif + STACK_POP(first, last, depth, limit); + continue; + } + + Td = T + depth; + if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); } + if(limit < 0) { + for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) { + if((x = Td[PA[*a]]) != v) { + if(1 < (a - first)) { break; } + v = x; + first = a; + } + } + if(Td[PA[*first] - 1] < v) { + first = ss_partition(PA, first, a, depth); + } + if((a - first) <= (last - a)) { + if(1 < (a - first)) { + STACK_PUSH(a, last, depth, -1); + last = a, depth += 1, limit = ss_ilg(a - first); + } else { + first = a, limit = -1; + } + } else { + if(1 < (last - a)) { + STACK_PUSH(first, a, depth + 1, ss_ilg(a - first)); + first = a, limit = -1; + } else { + last = a, depth += 1, limit = ss_ilg(a - first); + } + } + continue; + } + + /* choose pivot */ + a = ss_pivot(Td, PA, first, last); + v = Td[PA[*a]]; + SWAP(*first, *a); + + /* partition */ + for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { } + if(((a = b) < last) && (x < v)) { + for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) { + if(x == v) { SWAP(*b, *a); ++a; } + } + } + for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { } + if((b < (d = c)) && (x > v)) { + for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { + if(x == v) { SWAP(*c, *d); --d; } + } + } + for(; b < c;) { + SWAP(*b, *c); + for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) { + if(x == v) { SWAP(*b, *a); ++a; } + } + for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { + if(x == v) { SWAP(*c, *d); --d; } + } + } + + if(a <= d) { + c = b - 1; + + if((s = a - first) > (t = b - a)) { s = t; } + for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } + if((s = d - c) > (t = last - d - 1)) { s = t; } + for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } + + a = first + (b - a), c = last - (d - c); + b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth); + + if((a - first) <= (last - c)) { + if((last - c) <= (c - b)) { + STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + STACK_PUSH(c, last, depth, limit); + last = a; + } else if((a - first) <= (c - b)) { + STACK_PUSH(c, last, depth, limit); + STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + last = a; + } else { + STACK_PUSH(c, last, depth, limit); + STACK_PUSH(first, a, depth, limit); + first = b, last = c, depth += 1, limit = ss_ilg(c - b); + } + } else { + if((a - first) <= (c - b)) { + STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + STACK_PUSH(first, a, depth, limit); + first = c; + } else if((last - c) <= (c - b)) { + STACK_PUSH(first, a, depth, limit); + STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + first = c; + } else { + STACK_PUSH(first, a, depth, limit); + STACK_PUSH(c, last, depth, limit); + first = b, last = c, depth += 1, limit = ss_ilg(c - b); + } + } + } else { + limit += 1; + if(Td[PA[*first] - 1] < v) { + first = ss_partition(PA, first, last, depth); + limit = ss_ilg(last - first); + } + depth += 1; + } + } +#undef STACK_SIZE +} + +#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ + + +/*---------------------------------------------------------------------------*/ + +#if SS_BLOCKSIZE != 0 + +static INLINE +void +ss_blockswap(int *a, int *b, int n) { + int t; + for(; 0 < n; --n, ++a, ++b) { + t = *a, *a = *b, *b = t; + } +} + +static INLINE +void +ss_rotate(int *first, int *middle, int *last) { + int *a, *b, t; + int l, r; + l = middle - first, r = last - middle; + for(; (0 < l) && (0 < r);) { + if(l == r) { ss_blockswap(first, middle, l); break; } + if(l < r) { + a = last - 1, b = middle - 1; + t = *a; + do { + *a-- = *b, *b-- = *a; + if(b < first) { + *a = t; + last = a; + if((r -= l + 1) <= l) { break; } + a -= 1, b = middle - 1; + t = *a; + } + } while(1); + } else { + a = first, b = middle; + t = *a; + do { + *a++ = *b, *b++ = *a; + if(last <= b) { + *a = t; + first = a + 1; + if((l -= r + 1) <= r) { break; } + a += 1, b = middle; + t = *a; + } + } while(1); + } + } +} + + +/*---------------------------------------------------------------------------*/ + +static +void +ss_inplacemerge(const unsigned char *T, const int *PA, + int *first, int *middle, int *last, + int depth) { + const int *p; + int *a, *b; + int len, half; + int q, r; + int x; + + for(;;) { + if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); } + else { x = 0; p = PA + *(last - 1); } + for(a = first, len = middle - first, half = len >> 1, r = -1; + 0 < len; + len = half, half >>= 1) { + b = a + half; + q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth); + if(q < 0) { + a = b + 1; + half -= (len & 1) ^ 1; + } else { + r = q; + } + } + if(a < middle) { + if(r == 0) { *a = ~*a; } + ss_rotate(a, middle, last); + last -= middle - a; + middle = a; + if(first == middle) { break; } + } + --last; + if(x != 0) { while(*--last < 0) { } } + if(middle == last) { break; } + } +} + + +/*---------------------------------------------------------------------------*/ + +/* Merge-forward with internal buffer. */ +static +void +ss_mergeforward(const unsigned char *T, const int *PA, + int *first, int *middle, int *last, + int *buf, int depth) { + int *a, *b, *c, *bufend; + int t; + int r; + + bufend = buf + (middle - first) - 1; + ss_blockswap(buf, first, middle - first); + + for(t = *(a = first), b = buf, c = middle;;) { + r = ss_compare(T, PA + *b, PA + *c, depth); + if(r < 0) { + do { + *a++ = *b; + if(bufend <= b) { *bufend = t; return; } + *b++ = *a; + } while(*b < 0); + } else if(r > 0) { + do { + *a++ = *c, *c++ = *a; + if(last <= c) { + while(b < bufend) { *a++ = *b, *b++ = *a; } + *a = *b, *b = t; + return; + } + } while(*c < 0); + } else { + *c = ~*c; + do { + *a++ = *b; + if(bufend <= b) { *bufend = t; return; } + *b++ = *a; + } while(*b < 0); + + do { + *a++ = *c, *c++ = *a; + if(last <= c) { + while(b < bufend) { *a++ = *b, *b++ = *a; } + *a = *b, *b = t; + return; + } + } while(*c < 0); + } + } +} + +/* Merge-backward with internal buffer. */ +static +void +ss_mergebackward(const unsigned char *T, const int *PA, + int *first, int *middle, int *last, + int *buf, int depth) { + const int *p1, *p2; + int *a, *b, *c, *bufend; + int t; + int r; + int x; + + bufend = buf + (last - middle) - 1; + ss_blockswap(buf, middle, last - middle); + + x = 0; + if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; } + else { p1 = PA + *bufend; } + if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; } + else { p2 = PA + *(middle - 1); } + for(t = *(a = last - 1), b = bufend, c = middle - 1;;) { + r = ss_compare(T, p1, p2, depth); + if(0 < r) { + if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } + *a-- = *b; + if(b <= buf) { *buf = t; break; } + *b-- = *a; + if(*b < 0) { p1 = PA + ~*b; x |= 1; } + else { p1 = PA + *b; } + } else if(r < 0) { + if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } + *a-- = *c, *c-- = *a; + if(c < first) { + while(buf < b) { *a-- = *b, *b-- = *a; } + *a = *b, *b = t; + break; + } + if(*c < 0) { p2 = PA + ~*c; x |= 2; } + else { p2 = PA + *c; } + } else { + if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } + *a-- = ~*b; + if(b <= buf) { *buf = t; break; } + *b-- = *a; + if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } + *a-- = *c, *c-- = *a; + if(c < first) { + while(buf < b) { *a-- = *b, *b-- = *a; } + *a = *b, *b = t; + break; + } + if(*b < 0) { p1 = PA + ~*b; x |= 1; } + else { p1 = PA + *b; } + if(*c < 0) { p2 = PA + ~*c; x |= 2; } + else { p2 = PA + *c; } + } + } +} + +/* D&C based merge. */ +static +void +ss_swapmerge(const unsigned char *T, const int *PA, + int *first, int *middle, int *last, + int *buf, int bufsize, int depth) { +#define STACK_SIZE SS_SMERGE_STACKSIZE +#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a))) +#define MERGE_CHECK(a, b, c)\ + do {\ + if(((c) & 1) ||\ + (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\ + *(a) = ~*(a);\ + }\ + if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\ + *(b) = ~*(b);\ + }\ + } while(0) + struct { int *a, *b, *c; int d; } stack[STACK_SIZE]; + int *l, *r, *lm, *rm; + int m, len, half; + int ssize; + int check, next; + + for(check = 0, ssize = 0;;) { + if((last - middle) <= bufsize) { + if((first < middle) && (middle < last)) { + ss_mergebackward(T, PA, first, middle, last, buf, depth); + } + MERGE_CHECK(first, last, check); + STACK_POP(first, middle, last, check); + continue; + } + + if((middle - first) <= bufsize) { + if(first < middle) { + ss_mergeforward(T, PA, first, middle, last, buf, depth); + } + MERGE_CHECK(first, last, check); + STACK_POP(first, middle, last, check); + continue; + } + + for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1; + 0 < len; + len = half, half >>= 1) { + if(ss_compare(T, PA + GETIDX(*(middle + m + half)), + PA + GETIDX(*(middle - m - half - 1)), depth) < 0) { + m += half + 1; + half -= (len & 1) ^ 1; + } + } + + if(0 < m) { + lm = middle - m, rm = middle + m; + ss_blockswap(lm, middle, m); + l = r = middle, next = 0; + if(rm < last) { + if(*rm < 0) { + *rm = ~*rm; + if(first < lm) { for(; *--l < 0;) { } next |= 4; } + next |= 1; + } else if(first < lm) { + for(; *r < 0; ++r) { } + next |= 2; + } + } + + if((l - first) <= (last - r)) { + STACK_PUSH(r, rm, last, (next & 3) | (check & 4)); + middle = lm, last = l, check = (check & 3) | (next & 4); + } else { + if((next & 2) && (r == middle)) { next ^= 6; } + STACK_PUSH(first, lm, l, (check & 3) | (next & 4)); + first = r, middle = rm, check = (next & 3) | (check & 4); + } + } else { + if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) { + *middle = ~*middle; + } + MERGE_CHECK(first, last, check); + STACK_POP(first, middle, last, check); + } + } +#undef STACK_SIZE +} + +#endif /* SS_BLOCKSIZE != 0 */ + + +/*---------------------------------------------------------------------------*/ + +/* Substring sort */ +static +void +sssort(const unsigned char *T, const int *PA, + int *first, int *last, + int *buf, int bufsize, + int depth, int n, int lastsuffix) { + int *a; +#if SS_BLOCKSIZE != 0 + int *b, *middle, *curbuf; + int j, k, curbufsize, limit; +#endif + int i; + + if(lastsuffix != 0) { ++first; } + +#if SS_BLOCKSIZE == 0 + ss_mintrosort(T, PA, first, last, depth); +#else + if((bufsize < SS_BLOCKSIZE) && + (bufsize < (last - first)) && + (bufsize < (limit = ss_isqrt(last - first)))) { + if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; } + buf = middle = last - limit, bufsize = limit; + } else { + middle = last, limit = 0; + } + for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) { +#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE + ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth); +#elif 1 < SS_BLOCKSIZE + ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth); +#endif + curbufsize = last - (a + SS_BLOCKSIZE); + curbuf = a + SS_BLOCKSIZE; + if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; } + for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) { + ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth); + } + } +#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE + ss_mintrosort(T, PA, a, middle, depth); +#elif 1 < SS_BLOCKSIZE + ss_insertionsort(T, PA, a, middle, depth); +#endif + for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) { + if(i & 1) { + ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth); + a -= k; + } + } + if(limit != 0) { +#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE + ss_mintrosort(T, PA, middle, last, depth); +#elif 1 < SS_BLOCKSIZE + ss_insertionsort(T, PA, middle, last, depth); +#endif + ss_inplacemerge(T, PA, first, middle, last, depth); + } +#endif + + if(lastsuffix != 0) { + /* Insert last type B* suffix. */ + int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2; + for(a = first, i = *(first - 1); + (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth))); + ++a) { + *(a - 1) = *a; + } + *(a - 1) = i; + } +} + + +/*---------------------------------------------------------------------------*/ + +static INLINE +int +tr_ilg(int n) { + return (n & 0xffff0000) ? + ((n & 0xff000000) ? + 24 + lg_table[(n >> 24) & 0xff] : + 16 + lg_table[(n >> 16) & 0xff]) : + ((n & 0x0000ff00) ? + 8 + lg_table[(n >> 8) & 0xff] : + 0 + lg_table[(n >> 0) & 0xff]); +} + + +/*---------------------------------------------------------------------------*/ + +/* Simple insertionsort for small size groups. */ +static +void +tr_insertionsort(const int *ISAd, int *first, int *last) { + int *a, *b; + int t, r; + + for(a = first + 1; a < last; ++a) { + for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) { + do { *(b + 1) = *b; } while((first <= --b) && (*b < 0)); + if(b < first) { break; } + } + if(r == 0) { *b = ~*b; } + *(b + 1) = t; + } +} + + +/*---------------------------------------------------------------------------*/ + +static INLINE +void +tr_fixdown(const int *ISAd, int *SA, int i, int size) { + int j, k; + int v; + int c, d, e; + + for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { + d = ISAd[SA[k = j++]]; + if(d < (e = ISAd[SA[j]])) { k = j; d = e; } + if(d <= c) { break; } + } + SA[i] = v; +} + +/* Simple top-down heapsort. */ +static +void +tr_heapsort(const int *ISAd, int *SA, int size) { + int i, m; + int t; + + m = size; + if((size % 2) == 0) { + m--; + if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); } + } + + for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); } + if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); } + for(i = m - 1; 0 < i; --i) { + t = SA[0], SA[0] = SA[i]; + tr_fixdown(ISAd, SA, 0, i); + SA[i] = t; + } +} + + +/*---------------------------------------------------------------------------*/ + +/* Returns the median of three elements. */ +static INLINE +int * +tr_median3(const int *ISAd, int *v1, int *v2, int *v3) { + int *t; + if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); } + if(ISAd[*v2] > ISAd[*v3]) { + if(ISAd[*v1] > ISAd[*v3]) { return v1; } + else { return v3; } + } + return v2; +} + +/* Returns the median of five elements. */ +static INLINE +int * +tr_median5(const int *ISAd, + int *v1, int *v2, int *v3, int *v4, int *v5) { + int *t; + if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); } + if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); } + if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); } + if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); } + if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); } + if(ISAd[*v3] > ISAd[*v4]) { return v4; } + return v3; +} + +/* Returns the pivot element. */ +static INLINE +int * +tr_pivot(const int *ISAd, int *first, int *last) { + int *middle; + int t; + + t = last - first; + middle = first + t / 2; + + if(t <= 512) { + if(t <= 32) { + return tr_median3(ISAd, first, middle, last - 1); + } else { + t >>= 2; + return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1); + } + } + t >>= 3; + first = tr_median3(ISAd, first, first + t, first + (t << 1)); + middle = tr_median3(ISAd, middle - t, middle, middle + t); + last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1); + return tr_median3(ISAd, first, middle, last); +} + + +/*---------------------------------------------------------------------------*/ + +typedef struct _trbudget_t trbudget_t; +struct _trbudget_t { + int chance; + int remain; + int incval; + int count; +}; + +static INLINE +void +trbudget_init(trbudget_t *budget, int chance, int incval) { + budget->chance = chance; + budget->remain = budget->incval = incval; +} + +static INLINE +int +trbudget_check(trbudget_t *budget, int size) { + if(size <= budget->remain) { budget->remain -= size; return 1; } + if(budget->chance == 0) { budget->count += size; return 0; } + budget->remain += budget->incval - size; + budget->chance -= 1; + return 1; +} + + +/*---------------------------------------------------------------------------*/ + +static INLINE +void +tr_partition(const int *ISAd, + int *first, int *middle, int *last, + int **pa, int **pb, int v) { + int *a, *b, *c, *d, *e, *f; + int t, s; + int x = 0; + + for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { } + if(((a = b) < last) && (x < v)) { + for(; (++b < last) && ((x = ISAd[*b]) <= v);) { + if(x == v) { SWAP(*b, *a); ++a; } + } + } + for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { } + if((b < (d = c)) && (x > v)) { + for(; (b < --c) && ((x = ISAd[*c]) >= v);) { + if(x == v) { SWAP(*c, *d); --d; } + } + } + for(; b < c;) { + SWAP(*b, *c); + for(; (++b < c) && ((x = ISAd[*b]) <= v);) { + if(x == v) { SWAP(*b, *a); ++a; } + } + for(; (b < --c) && ((x = ISAd[*c]) >= v);) { + if(x == v) { SWAP(*c, *d); --d; } + } + } + + if(a <= d) { + c = b - 1; + if((s = a - first) > (t = b - a)) { s = t; } + for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } + if((s = d - c) > (t = last - d - 1)) { s = t; } + for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } + first += (b - a), last -= (d - c); + } + *pa = first, *pb = last; +} + +static +void +tr_copy(int *ISA, const int *SA, + int *first, int *a, int *b, int *last, + int depth) { + /* sort suffixes of middle partition + by using sorted order of suffixes of left and right partition. */ + int *c, *d, *e; + int s, v; + + v = b - SA - 1; + for(c = first, d = a - 1; c <= d; ++c) { + if((0 <= (s = *c - depth)) && (ISA[s] == v)) { + *++d = s; + ISA[s] = d - SA; + } + } + for(c = last - 1, e = d + 1, d = b; e < d; --c) { + if((0 <= (s = *c - depth)) && (ISA[s] == v)) { + *--d = s; + ISA[s] = d - SA; + } + } +} + +static +void +tr_partialcopy(int *ISA, const int *SA, + int *first, int *a, int *b, int *last, + int depth) { + int *c, *d, *e; + int s, v; + int rank, lastrank, newrank = -1; + + v = b - SA - 1; + lastrank = -1; + for(c = first, d = a - 1; c <= d; ++c) { + if((0 <= (s = *c - depth)) && (ISA[s] == v)) { + *++d = s; + rank = ISA[s + depth]; + if(lastrank != rank) { lastrank = rank; newrank = d - SA; } + ISA[s] = newrank; + } + } + + lastrank = -1; + for(e = d; first <= e; --e) { + rank = ISA[*e]; + if(lastrank != rank) { lastrank = rank; newrank = e - SA; } + if(newrank != rank) { ISA[*e] = newrank; } + } + + lastrank = -1; + for(c = last - 1, e = d + 1, d = b; e < d; --c) { + if((0 <= (s = *c - depth)) && (ISA[s] == v)) { + *--d = s; + rank = ISA[s + depth]; + if(lastrank != rank) { lastrank = rank; newrank = d - SA; } + ISA[s] = newrank; + } + } +} + +static +void +tr_introsort(int *ISA, const int *ISAd, + int *SA, int *first, int *last, + trbudget_t *budget) { +#define STACK_SIZE TR_STACKSIZE + struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE]; + int *a, *b, *c; + int t; + int v, x = 0; + int incr = ISAd - ISA; + int limit, next; + int ssize, trlink = -1; + + for(ssize = 0, limit = tr_ilg(last - first);;) { + + if(limit < 0) { + if(limit == -1) { + /* tandem repeat partition */ + tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1); + + /* update ranks */ + if(a < last) { + for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } + } + if(b < last) { + for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } + } + + /* push */ + if(1 < (b - a)) { + STACK_PUSH5(NULL, a, b, 0, 0); + STACK_PUSH5(ISAd - incr, first, last, -2, trlink); + trlink = ssize - 2; + } + if((a - first) <= (last - b)) { + if(1 < (a - first)) { + STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink); + last = a, limit = tr_ilg(a - first); + } else if(1 < (last - b)) { + first = b, limit = tr_ilg(last - b); + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } else { + if(1 < (last - b)) { + STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink); + first = b, limit = tr_ilg(last - b); + } else if(1 < (a - first)) { + last = a, limit = tr_ilg(a - first); + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + } else if(limit == -2) { + /* tandem repeat copy */ + a = stack[--ssize].b, b = stack[ssize].c; + if(stack[ssize].d == 0) { + tr_copy(ISA, SA, first, a, b, last, ISAd - ISA); + } else { + if(0 <= trlink) { stack[trlink].d = -1; } + tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA); + } + STACK_POP5(ISAd, first, last, limit, trlink); + } else { + /* sorted partition */ + if(0 <= *first) { + a = first; + do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a)); + first = a; + } + if(first < last) { + a = first; do { *a = ~*a; } while(*++a < 0); + next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1; + if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } } + + /* push */ + if(trbudget_check(budget, a - first)) { + if((a - first) <= (last - a)) { + STACK_PUSH5(ISAd, a, last, -3, trlink); + ISAd += incr, last = a, limit = next; + } else { + if(1 < (last - a)) { + STACK_PUSH5(ISAd + incr, first, a, next, trlink); + first = a, limit = -3; + } else { + ISAd += incr, last = a, limit = next; + } + } + } else { + if(0 <= trlink) { stack[trlink].d = -1; } + if(1 < (last - a)) { + first = a, limit = -3; + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + continue; + } + + if((last - first) <= TR_INSERTIONSORT_THRESHOLD) { + tr_insertionsort(ISAd, first, last); + limit = -3; + continue; + } + + if(limit-- == 0) { + tr_heapsort(ISAd, first, last - first); + for(a = last - 1; first < a; a = b) { + for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; } + } + limit = -3; + continue; + } + + /* choose pivot */ + a = tr_pivot(ISAd, first, last); + SWAP(*first, *a); + v = ISAd[*first]; + + /* partition */ + tr_partition(ISAd, first, first + 1, last, &a, &b, v); + if((last - first) != (b - a)) { + next = (ISA[*a] != v) ? tr_ilg(b - a) : -1; + + /* update ranks */ + for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } + if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } + + /* push */ + if((1 < (b - a)) && (trbudget_check(budget, b - a))) { + if((a - first) <= (last - b)) { + if((last - b) <= (b - a)) { + if(1 < (a - first)) { + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + STACK_PUSH5(ISAd, b, last, limit, trlink); + last = a; + } else if(1 < (last - b)) { + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + first = b; + } else { + ISAd += incr, first = a, last = b, limit = next; + } + } else if((a - first) <= (b - a)) { + if(1 < (a - first)) { + STACK_PUSH5(ISAd, b, last, limit, trlink); + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + last = a; + } else { + STACK_PUSH5(ISAd, b, last, limit, trlink); + ISAd += incr, first = a, last = b, limit = next; + } + } else { + STACK_PUSH5(ISAd, b, last, limit, trlink); + STACK_PUSH5(ISAd, first, a, limit, trlink); + ISAd += incr, first = a, last = b, limit = next; + } + } else { + if((a - first) <= (b - a)) { + if(1 < (last - b)) { + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + STACK_PUSH5(ISAd, first, a, limit, trlink); + first = b; + } else if(1 < (a - first)) { + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + last = a; + } else { + ISAd += incr, first = a, last = b, limit = next; + } + } else if((last - b) <= (b - a)) { + if(1 < (last - b)) { + STACK_PUSH5(ISAd, first, a, limit, trlink); + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + first = b; + } else { + STACK_PUSH5(ISAd, first, a, limit, trlink); + ISAd += incr, first = a, last = b, limit = next; + } + } else { + STACK_PUSH5(ISAd, first, a, limit, trlink); + STACK_PUSH5(ISAd, b, last, limit, trlink); + ISAd += incr, first = a, last = b, limit = next; + } + } + } else { + if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; } + if((a - first) <= (last - b)) { + if(1 < (a - first)) { + STACK_PUSH5(ISAd, b, last, limit, trlink); + last = a; + } else if(1 < (last - b)) { + first = b; + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } else { + if(1 < (last - b)) { + STACK_PUSH5(ISAd, first, a, limit, trlink); + first = b; + } else if(1 < (a - first)) { + last = a; + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + } + } else { + if(trbudget_check(budget, last - first)) { + limit = tr_ilg(last - first), ISAd += incr; + } else { + if(0 <= trlink) { stack[trlink].d = -1; } + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + } +#undef STACK_SIZE +} + + + +/*---------------------------------------------------------------------------*/ + +/* Tandem repeat sort */ +static +void +trsort(int *ISA, int *SA, int n, int depth) { + int *ISAd; + int *first, *last; + trbudget_t budget; + int t, skip, unsorted; + + trbudget_init(&budget, tr_ilg(n) * 2 / 3, n); +/* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */ + for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) { + first = SA; + skip = 0; + unsorted = 0; + do { + if((t = *first) < 0) { first -= t; skip += t; } + else { + if(skip != 0) { *(first + skip) = skip; skip = 0; } + last = SA + ISA[t] + 1; + if(1 < (last - first)) { + budget.count = 0; + tr_introsort(ISA, ISAd, SA, first, last, &budget); + if(budget.count != 0) { unsorted += budget.count; } + else { skip = first - last; } + } else if((last - first) == 1) { + skip = -1; + } + first = last; + } + } while(first < (SA + n)); + if(skip != 0) { *(first + skip) = skip; } + if(unsorted == 0) { break; } + } +} + + +/*---------------------------------------------------------------------------*/ + +/* Sorts suffixes of type B*. */ +static +int +sort_typeBstar(const unsigned char *T, int *SA, + int *bucket_A, int *bucket_B, + int n, int openMP) { + int *PAb, *ISAb, *buf; +#ifdef LIBBSC_OPENMP + int *curbuf; + int l; +#endif + int i, j, k, t, m, bufsize; + int c0, c1; +#ifdef LIBBSC_OPENMP + int d0, d1; +#endif + (void)openMP; + + /* Initialize bucket arrays. */ + for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } + for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } + + /* Count the number of occurrences of the first one or two characters of each + type A, B and B* suffix. Moreover, store the beginning position of all + type B* suffixes into the array SA. */ + for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { + /* type A suffix. */ + do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); + if(0 <= i) { + /* type B* suffix. */ + ++BUCKET_BSTAR(c0, c1); + SA[--m] = i; + /* type B suffix. */ + for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { + ++BUCKET_B(c0, c1); + } + } + } + m = n - m; +/* +note: + A type B* suffix is lexicographically smaller than a type B suffix that + begins with the same first two characters. +*/ + + /* Calculate the index of start/end point of each bucket. */ + for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { + t = i + BUCKET_A(c0); + BUCKET_A(c0) = i + j; /* start point */ + i = t + BUCKET_B(c0, c0); + for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { + j += BUCKET_BSTAR(c0, c1); + BUCKET_BSTAR(c0, c1) = j; /* end point */ + i += BUCKET_B(c0, c1); + } + } + + if(0 < m) { + /* Sort the type B* suffixes by their first two characters. */ + PAb = SA + n - m; ISAb = SA + m; + for(i = m - 2; 0 <= i; --i) { + t = PAb[i], c0 = T[t], c1 = T[t + 1]; + SA[--BUCKET_BSTAR(c0, c1)] = i; + } + t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; + SA[--BUCKET_BSTAR(c0, c1)] = m - 1; + + /* Sort the type B* substrings using sssort. */ +#ifdef LIBBSC_OPENMP + if (openMP) + { + buf = SA + m; + c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; +#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1) + { + bufsize = (n - (2 * m)) / omp_get_num_threads(); + curbuf = buf + omp_get_thread_num() * bufsize; + k = 0; + for(;;) { + #pragma omp critical(sssort_lock) + { + if(0 < (l = j)) { + d0 = c0, d1 = c1; + do { + k = BUCKET_BSTAR(d0, d1); + if(--d1 <= d0) { + d1 = ALPHABET_SIZE - 1; + if(--d0 < 0) { break; } + } + } while(((l - k) <= 1) && (0 < (l = k))); + c0 = d0, c1 = d1, j = k; + } + } + if(l == 0) { break; } + sssort(T, PAb, SA + k, SA + l, + curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); + } + } + } + else + { + buf = SA + m, bufsize = n - (2 * m); + for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { + for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { + i = BUCKET_BSTAR(c0, c1); + if(1 < (j - i)) { + sssort(T, PAb, SA + i, SA + j, + buf, bufsize, 2, n, *(SA + i) == (m - 1)); + } + } + } + } +#else + buf = SA + m, bufsize = n - (2 * m); + for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { + for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { + i = BUCKET_BSTAR(c0, c1); + if(1 < (j - i)) { + sssort(T, PAb, SA + i, SA + j, + buf, bufsize, 2, n, *(SA + i) == (m - 1)); + } + } + } +#endif + + /* Compute ranks of type B* substrings. */ + for(i = m - 1; 0 <= i; --i) { + if(0 <= SA[i]) { + j = i; + do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); + SA[i + 1] = i - j; + if(i <= 0) { break; } + } + j = i; + do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); + ISAb[SA[i]] = j; + } + + /* Construct the inverse suffix array of type B* suffixes using trsort. */ + trsort(ISAb, SA, m, 1); + + /* Set the sorted order of tyoe B* suffixes. */ + for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { + for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } + if(0 <= i) { + t = i; + for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } + SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; + } + } + + /* Calculate the index of start/end point of each bucket. */ + BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ + for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { + i = BUCKET_A(c0 + 1) - 1; + for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { + t = i - BUCKET_B(c0, c1); + BUCKET_B(c0, c1) = i; /* end point */ + + /* Move all type B* suffixes to the correct position. */ + for(i = t, j = BUCKET_BSTAR(c0, c1); + j <= k; + --i, --k) { SA[i] = SA[k]; } + } + BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ + BUCKET_B(c0, c0) = i; /* end point */ + } + } + + return m; +} + +/* Constructs the suffix array by using the sorted order of type B* suffixes. */ +static +void +construct_SA(const unsigned char *T, int *SA, + int *bucket_A, int *bucket_B, + int n, int m) { + int *i, *j, *k; + int s; + int c0, c1, c2; + + if(0 < m) { + /* Construct the sorted order of type B suffixes by using + the sorted order of type B* suffixes. */ + for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { + /* Scan the suffix array from right to left. */ + for(i = SA + BUCKET_BSTAR(c1, c1 + 1), + j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; + i <= j; + --j) { + if(0 < (s = *j)) { + assert(T[s] == c1); + assert(((s + 1) < n) && (T[s] <= T[s + 1])); + assert(T[s - 1] <= T[s]); + *j = ~s; + c0 = T[--s]; + if((0 < s) && (T[s - 1] > c0)) { s = ~s; } + if(c0 != c2) { + if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + k = SA + BUCKET_B(c2 = c0, c1); + } + assert(k < j); + *k-- = s; + } else { + assert(((s == 0) && (T[s] == c1)) || (s < 0)); + *j = ~s; + } + } + } + } + + /* Construct the suffix array by using + the sorted order of type B suffixes. */ + k = SA + BUCKET_A(c2 = T[n - 1]); + *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); + /* Scan the suffix array from left to right. */ + for(i = SA, j = SA + n; i < j; ++i) { + if(0 < (s = *i)) { + assert(T[s - 1] >= T[s]); + c0 = T[--s]; + if((s == 0) || (T[s - 1] < c0)) { s = ~s; } + if(c0 != c2) { + BUCKET_A(c2) = k - SA; + k = SA + BUCKET_A(c2 = c0); + } + assert(i < k); + *k++ = s; + } else { + assert(s < 0); + *i = ~s; + } + } +} + +/* Constructs the burrows-wheeler transformed string directly + by using the sorted order of type B* suffixes. */ +static +int +construct_BWT(const unsigned char *T, int *SA, + int *bucket_A, int *bucket_B, + int n, int m) { + int *i, *j, *k, *orig; + int s; + int c0, c1, c2; + + if(0 < m) { + /* Construct the sorted order of type B suffixes by using + the sorted order of type B* suffixes. */ + for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { + /* Scan the suffix array from right to left. */ + for(i = SA + BUCKET_BSTAR(c1, c1 + 1), + j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; + i <= j; + --j) { + if(0 < (s = *j)) { + assert(T[s] == c1); + assert(((s + 1) < n) && (T[s] <= T[s + 1])); + assert(T[s - 1] <= T[s]); + c0 = T[--s]; + *j = ~((int)c0); + if((0 < s) && (T[s - 1] > c0)) { s = ~s; } + if(c0 != c2) { + if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + k = SA + BUCKET_B(c2 = c0, c1); + } + assert(k < j); + *k-- = s; + } else if(s != 0) { + *j = ~s; +#ifndef NDEBUG + } else { + assert(T[s] == c1); +#endif + } + } + } + } + + /* Construct the BWTed string by using + the sorted order of type B suffixes. */ + k = SA + BUCKET_A(c2 = T[n - 1]); + *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1); + /* Scan the suffix array from left to right. */ + for(i = SA, j = SA + n, orig = SA; i < j; ++i) { + if(0 < (s = *i)) { + assert(T[s - 1] >= T[s]); + c0 = T[--s]; + *i = c0; + if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); } + if(c0 != c2) { + BUCKET_A(c2) = k - SA; + k = SA + BUCKET_A(c2 = c0); + } + assert(i < k); + *k++ = s; + } else if(s != 0) { + *i = ~s; + } else { + orig = i; + } + } + + return orig - SA; +} + +/* Constructs the burrows-wheeler transformed string directly + by using the sorted order of type B* suffixes. */ +static +int +construct_BWT_indexes(const unsigned char *T, int *SA, + int *bucket_A, int *bucket_B, + int n, int m, + unsigned char * num_indexes, int * indexes) { + int *i, *j, *k, *orig; + int s; + int c0, c1, c2; + + int mod = n / 8; + { + mod |= mod >> 1; mod |= mod >> 2; + mod |= mod >> 4; mod |= mod >> 8; + mod |= mod >> 16; mod >>= 1; + + *num_indexes = (unsigned char)((n - 1) / (mod + 1)); + } + + if(0 < m) { + /* Construct the sorted order of type B suffixes by using + the sorted order of type B* suffixes. */ + for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { + /* Scan the suffix array from right to left. */ + for(i = SA + BUCKET_BSTAR(c1, c1 + 1), + j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; + i <= j; + --j) { + if(0 < (s = *j)) { + assert(T[s] == c1); + assert(((s + 1) < n) && (T[s] <= T[s + 1])); + assert(T[s - 1] <= T[s]); + + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA; + + c0 = T[--s]; + *j = ~((int)c0); + if((0 < s) && (T[s - 1] > c0)) { s = ~s; } + if(c0 != c2) { + if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + k = SA + BUCKET_B(c2 = c0, c1); + } + assert(k < j); + *k-- = s; + } else if(s != 0) { + *j = ~s; +#ifndef NDEBUG + } else { + assert(T[s] == c1); +#endif + } + } + } + } + + /* Construct the BWTed string by using + the sorted order of type B suffixes. */ + k = SA + BUCKET_A(c2 = T[n - 1]); + if (T[n - 2] < c2) { + if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA; + *k++ = ~((int)T[n - 2]); + } + else { + *k++ = n - 1; + } + + /* Scan the suffix array from left to right. */ + for(i = SA, j = SA + n, orig = SA; i < j; ++i) { + if(0 < (s = *i)) { + assert(T[s - 1] >= T[s]); + + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA; + + c0 = T[--s]; + *i = c0; + if(c0 != c2) { + BUCKET_A(c2) = k - SA; + k = SA + BUCKET_A(c2 = c0); + } + assert(i < k); + if((0 < s) && (T[s - 1] < c0)) { + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA; + *k++ = ~((int)T[s - 1]); + } else + *k++ = s; + } else if(s != 0) { + *i = ~s; + } else { + orig = i; + } + } + + return orig - SA; +} + + +/*---------------------------------------------------------------------------*/ + +/*- Function -*/ + +int +divsufsort(const unsigned char *T, int *SA, int n, int openMP) { + int *bucket_A, *bucket_B; + int m; + int err = 0; + + /* Check arguments. */ + if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } + else if(n == 0) { return 0; } + else if(n == 1) { SA[0] = 0; return 0; } + else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } + + bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); + bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); + + /* Suffixsort. */ + if((bucket_A != NULL) && (bucket_B != NULL)) { + m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP); + construct_SA(T, SA, bucket_A, bucket_B, n, m); + } else { + err = -2; + } + + free(bucket_B); + free(bucket_A); + + return err; +} + +int +divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) { + int *B; + int *bucket_A, *bucket_B; + int m, pidx, i; + + /* Check arguments. */ + if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } + else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } + + if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); } + bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); + bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); + + /* Burrows-Wheeler Transform. */ + if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { + m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP); + + if (num_indexes == NULL || indexes == NULL) { + pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); + } else { + pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes); + } + + /* Copy to output string. */ + U[0] = T[n - 1]; + for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; } + for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; } + pidx += 1; + } else { + pidx = -2; + } + + free(bucket_B); + free(bucket_A); + if(A == NULL) { free(B); } + + return pidx; +} diff --git a/vendor/github.com/DataDog/zstd/divsufsort.h b/vendor/github.com/DataDog/zstd/divsufsort.h new file mode 100644 index 00000000000..5440994af15 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/divsufsort.h @@ -0,0 +1,67 @@ +/* + * divsufsort.h for libdivsufsort-lite + * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DIVSUFSORT_H +#define _DIVSUFSORT_H 1 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + +/*- Prototypes -*/ + +/** + * Constructs the suffix array of a given string. + * @param T [0..n-1] The input string. + * @param SA [0..n-1] The output array of suffixes. + * @param n The length of the given string. + * @param openMP enables OpenMP optimization. + * @return 0 if no error occurred, -1 or -2 otherwise. + */ +int +divsufsort(const unsigned char *T, int *SA, int n, int openMP); + +/** + * Constructs the burrows-wheeler transformed string of a given string. + * @param T [0..n-1] The input string. + * @param U [0..n-1] The output string. (can be T) + * @param A [0..n-1] The temporary array. (can be NULL) + * @param n The length of the given string. + * @param num_indexes The length of secondary indexes array. (can be NULL) + * @param indexes The secondary indexes array. (can be NULL) + * @param openMP enables OpenMP optimization. + * @return The primary index if no error occurred, -1 or -2 otherwise. + */ +int +divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP); + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* _DIVSUFSORT_H */ diff --git a/vendor/github.com/DataDog/zstd/entropy_common.c b/vendor/github.com/DataDog/zstd/entropy_common.c new file mode 100644 index 00000000000..b37a082fee2 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/entropy_common.c @@ -0,0 +1,221 @@ +/* + Common functions of New Generation Entropy library + Copyright (C) 2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +*************************************************************************** */ + +/* ************************************* +* Dependencies +***************************************/ +#include "mem.h" +#include "error_private.h" /* ERR_*, ERROR */ +#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ +#include "huf.h" + + +/*=== Version ===*/ +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } + + +/*=== Error Management ===*/ +unsigned FSE_isError(size_t code) { return ERR_isError(code); } +const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); } + +unsigned HUF_isError(size_t code) { return ERR_isError(code); } +const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); } + + +/*-************************************************************** +* FSE NCount encoding-decoding +****************************************************************/ +size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + const BYTE* const istart = (const BYTE*) headerBuffer; + const BYTE* const iend = istart + hbSize; + const BYTE* ip = istart; + int nbBits; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + if (hbSize < 4) return ERROR(srcSize_wrong); + bitStream = MEM_readLE32(ip); + nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ + if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = nbBits; + remaining = (1<1) & (charnum<=*maxSVPtr)) { + if (previous0) { + unsigned n0 = charnum; + while ((bitStream & 0xFFFF) == 0xFFFF) { + n0 += 24; + if (ip < iend-5) { + ip += 2; + bitStream = MEM_readLE32(ip) >> bitCount; + } else { + bitStream >>= 16; + bitCount += 16; + } } + while ((bitStream & 3) == 3) { + n0 += 3; + bitStream >>= 2; + bitCount += 2; + } + n0 += bitStream & 3; + bitCount += 2; + if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); + while (charnum < n0) normalizedCounter[charnum++] = 0; + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + ip += bitCount>>3; + bitCount &= 7; + bitStream = MEM_readLE32(ip) >> bitCount; + } else { + bitStream >>= 2; + } } + { int const max = (2*threshold-1) - remaining; + int count; + + if ((bitStream & (threshold-1)) < (U32)max) { + count = bitStream & (threshold-1); + bitCount += nbBits-1; + } else { + count = bitStream & (2*threshold-1); + if (count >= threshold) count -= max; + bitCount += nbBits; + } + + count--; /* extra accuracy */ + remaining -= count < 0 ? -count : count; /* -1 means +1 */ + normalizedCounter[charnum++] = (short)count; + previous0 = !count; + while (remaining < threshold) { + nbBits--; + threshold >>= 1; + } + + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + ip += bitCount>>3; + bitCount &= 7; + } else { + bitCount -= (int)(8 * (iend - 4 - ip)); + ip = iend - 4; + } + bitStream = MEM_readLE32(ip) >> (bitCount & 31); + } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ + if (remaining != 1) return ERROR(corruption_detected); + if (bitCount > 32) return ERROR(corruption_detected); + *maxSVPtr = charnum-1; + + ip += (bitCount+7)>>3; + return ip-istart; +} + + +/*! HUF_readStats() : + Read compact Huffman tree, saved by HUF_writeCTable(). + `huffWeight` is destination buffer. + `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. + @return : size read from `src` , or an error Code . + Note : Needed by HUF_readCTable() and HUF_readDTableX?() . +*/ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize) +{ + U32 weightTotal; + const BYTE* ip = (const BYTE*) src; + size_t iSize; + size_t oSize; + + if (!srcSize) return ERROR(srcSize_wrong); + iSize = ip[0]; + /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ + + if (iSize >= 128) { /* special header */ + oSize = iSize - 127; + iSize = ((oSize+1)/2); + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + if (oSize >= hwSize) return ERROR(corruption_detected); + ip += 1; + { U32 n; + for (n=0; n> 4; + huffWeight[n+1] = ip[n/2] & 15; + } } } + else { /* header compressed with FSE (normal case) */ + FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */ + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */ + if (FSE_isError(oSize)) return oSize; + } + + /* collect weight stats */ + memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); + weightTotal = 0; + { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); + rankStats[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; + } } + if (weightTotal == 0) return ERROR(corruption_detected); + + /* get last non-null symbol weight (implied, total must be 2^n) */ + { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); + *tableLogPtr = tableLog; + /* determine last weight */ + { U32 const total = 1 << tableLog; + U32 const rest = total - weightTotal; + U32 const verif = 1 << BIT_highbit32(rest); + U32 const lastWeight = BIT_highbit32(rest) + 1; + if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ + huffWeight[oSize] = (BYTE)lastWeight; + rankStats[lastWeight]++; + } } + + /* check tree construction validity */ + if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ + + /* results */ + *nbSymbolsPtr = (U32)(oSize+1); + return iSize+1; +} diff --git a/vendor/github.com/DataDog/zstd/error_private.c b/vendor/github.com/DataDog/zstd/error_private.c new file mode 100644 index 00000000000..d004ee636c6 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/error_private.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* The purpose of this file is to have a single list of error strings embedded in binary */ + +#include "error_private.h" + +const char* ERR_getErrorString(ERR_enum code) +{ + static const char* const notErrorCode = "Unspecified error code"; + switch( code ) + { + case PREFIX(no_error): return "No error detected"; + case PREFIX(GENERIC): return "Error (generic)"; + case PREFIX(prefix_unknown): return "Unknown frame descriptor"; + case PREFIX(version_unsupported): return "Version not supported"; + case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; + case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; + case PREFIX(corruption_detected): return "Corrupted block detected"; + case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; + case PREFIX(parameter_unsupported): return "Unsupported parameter"; + case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; + case PREFIX(init_missing): return "Context should be init first"; + case PREFIX(memory_allocation): return "Allocation error : not enough memory"; + case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough"; + case PREFIX(stage_wrong): return "Operation not authorized at current processing stage"; + case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; + case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; + case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; + case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; + case PREFIX(dictionary_wrong): return "Dictionary mismatch"; + case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; + case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; + case PREFIX(srcSize_wrong): return "Src size is incorrect"; + /* following error codes are not stable and may be removed or changed in a future version */ + case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; + case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; + case PREFIX(maxCode): + default: return notErrorCode; + } +} diff --git a/vendor/github.com/DataDog/zstd/error_private.h b/vendor/github.com/DataDog/zstd/error_private.h new file mode 100644 index 00000000000..0d2fa7e34b0 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/error_private.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* Note : this module is expected to remain private, do not expose it */ + +#ifndef ERROR_H_MODULE +#define ERROR_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* **************************************** +* Dependencies +******************************************/ +#include /* size_t */ +#include "zstd_errors.h" /* enum list */ + + +/* **************************************** +* Compiler-specific +******************************************/ +#if defined(__GNUC__) +# define ERR_STATIC static __attribute__((unused)) +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define ERR_STATIC static inline +#elif defined(_MSC_VER) +# define ERR_STATIC static __inline +#else +# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + + +/*-**************************************** +* Customization (error_public.h) +******************************************/ +typedef ZSTD_ErrorCode ERR_enum; +#define PREFIX(name) ZSTD_error_##name + + +/*-**************************************** +* Error codes handling +******************************************/ +#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ +#define ERROR(name) ZSTD_ERROR(name) +#define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) + +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } + +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } + + +/*-**************************************** +* Error Strings +******************************************/ + +const char* ERR_getErrorString(ERR_enum code); /* error_private.c */ + +ERR_STATIC const char* ERR_getErrorName(size_t code) +{ + return ERR_getErrorString(ERR_getErrorCode(code)); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* ERROR_H_MODULE */ diff --git a/vendor/github.com/DataDog/zstd/errors.go b/vendor/github.com/DataDog/zstd/errors.go new file mode 100644 index 00000000000..38db0d51cce --- /dev/null +++ b/vendor/github.com/DataDog/zstd/errors.go @@ -0,0 +1,35 @@ +package zstd + +/* +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +*/ +import "C" + +// ErrorCode is an error returned by the zstd library. +type ErrorCode int + +// Error returns the error string given by zstd +func (e ErrorCode) Error() string { + return C.GoString(C.ZSTD_getErrorName(C.size_t(e))) +} + +func cIsError(code int) bool { + return int(C.ZSTD_isError(C.size_t(code))) != 0 +} + +// getError returns an error for the return code, or nil if it's not an error +func getError(code int) error { + if code < 0 && cIsError(code) { + return ErrorCode(code) + } + return nil +} + +// IsDstSizeTooSmallError returns whether the error correspond to zstd standard sDstSizeTooSmall error +func IsDstSizeTooSmallError(e error) bool { + if e != nil && e.Error() == "Destination buffer is too small" { + return true + } + return false +} diff --git a/vendor/github.com/DataDog/zstd/fse.h b/vendor/github.com/DataDog/zstd/fse.h new file mode 100644 index 00000000000..6a1d272be5c --- /dev/null +++ b/vendor/github.com/DataDog/zstd/fse.h @@ -0,0 +1,704 @@ +/* ****************************************************************** + FSE : Finite State Entropy codec + Public Prototypes declaration + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef FSE_H +#define FSE_H + + +/*-***************************************** +* Dependencies +******************************************/ +#include /* size_t, ptrdiff_t */ + + +/*-***************************************** +* FSE_PUBLIC_API : control library symbols visibility +******************************************/ +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) +# define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ +# define FSE_PUBLIC_API __declspec(dllexport) +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) +# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define FSE_PUBLIC_API +#endif + +/*------ Version ------*/ +#define FSE_VERSION_MAJOR 0 +#define FSE_VERSION_MINOR 9 +#define FSE_VERSION_RELEASE 0 + +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE +#define FSE_QUOTE(str) #str +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) + +#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) +FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ + +/*-**************************************** +* FSE simple functions +******************************************/ +/*! FSE_compress() : + Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. + 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). + @return : size of compressed data (<= dstCapacity). + Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! + if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. + if FSE_isError(return), compression failed (more details using FSE_getErrorName()) +*/ +FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +/*! FSE_decompress(): + Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', + into already allocated destination buffer 'dst', of size 'dstCapacity'. + @return : size of regenerated data (<= maxDstSize), + or an error code, which can be tested using FSE_isError() . + + ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! + Why ? : making this distinction requires a header. + Header management is intentionally delegated to the user layer, which can better manage special cases. +*/ +FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, + const void* cSrc, size_t cSrcSize); + + +/*-***************************************** +* Tool functions +******************************************/ +FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ + +/* Error Management */ +FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ +FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ + + +/*-***************************************** +* FSE advanced functions +******************************************/ +/*! FSE_compress2() : + Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' + Both parameters can be defined as '0' to mean : use default value + @return : size of compressed data + Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! + if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. + if FSE_isError(return), it's an error code. +*/ +FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); + + +/*-***************************************** +* FSE detailed API +******************************************/ +/*! +FSE_compress() does the following: +1. count symbol occurrence from source[] into table count[] +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) +3. save normalized counters to memory buffer using writeNCount() +4. build encoding table 'CTable' from normalized counters +5. encode the data stream using encoding table 'CTable' + +FSE_decompress() does the following: +1. read normalized counters with readNCount() +2. build decoding table 'DTable' from normalized counters +3. decode the data stream using decoding table 'DTable' + +The following API allows targeting specific sub-functions for advanced tasks. +For example, it's possible to compress several blocks using the same 'CTable', +or to save and provide normalized distribution using external method. +*/ + +/* *** COMPRESSION *** */ + +/*! FSE_count(): + Provides the precise count of each byte within a table 'count'. + 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). + *maxSymbolValuePtr will be updated if detected smaller than initial value. + @return : the count of the most frequent symbol (which is not identified). + if return == srcSize, there is only one symbol. + Can also return an error code, which can be tested with FSE_isError(). */ +FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + +/*! FSE_optimalTableLog(): + dynamically downsize 'tableLog' when conditions are met. + It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. + @return : recommended tableLog (necessarily <= 'maxTableLog') */ +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); + +/*! FSE_normalizeCount(): + normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) + 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). + @return : tableLog, + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue); + +/*! FSE_NCountWriteBound(): + Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. + Typically useful for allocation purpose. */ +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_writeNCount(): + Compactly save 'normalizedCounter' into 'buffer'. + @return : size of the compressed table, + or an errorCode, which can be tested using FSE_isError(). */ +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + + +/*! Constructor and Destructor of FSE_CTable. + Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ +typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); +FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); + +/*! FSE_buildCTable(): + Builds `ct`, which must be already allocated, using FSE_createCTable(). + @return : 0, or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_compress_usingCTable(): + Compress `src` using `ct` into `dst` which must be already allocated. + @return : size of compressed data (<= `dstCapacity`), + or 0 if compressed data could not fit into `dst`, + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); + +/*! +Tutorial : +---------- +The first step is to count all symbols. FSE_count() does this job very fast. +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) +FSE_count() will return the number of occurrence of the most frequent symbol. +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). + +The next step is to normalize the frequencies. +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. +It also guarantees a minimum of 1 to any Symbol with frequency >= 1. +You can use 'tableLog'==0 to mean "use default tableLog value". +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). + +The result of FSE_normalizeCount() will be saved into a table, +called 'normalizedCounter', which is a table of signed short. +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. +The return value is tableLog if everything proceeded as expected. +It is 0 if there is a single symbol within distribution. +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). + +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). +'buffer' must be already allocated. +For guaranteed success, buffer size must be at least FSE_headerBound(). +The result of the function is the number of bytes written into 'buffer'. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). + +'normalizedCounter' can then be used to create the compression table 'CTable'. +The space required by 'CTable' must be already allocated, using FSE_createCTable(). +You can then use FSE_buildCTable() to fill 'CTable'. +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). + +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. +If it returns '0', compressed data could not fit into 'dst'. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). +*/ + + +/* *** DECOMPRESSION *** */ + +/*! FSE_readNCount(): + Read compactly saved 'normalizedCounter' from 'rBuffer'. + @return : size read from 'rBuffer', + or an errorCode, which can be tested using FSE_isError(). + maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); + +/*! Constructor and Destructor of FSE_DTable. + Note that its size depends on 'tableLog' */ +typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ +FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); +FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); + +/*! FSE_buildDTable(): + Builds 'dt', which must be already allocated, using FSE_createDTable(). + return : 0, or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_decompress_usingDTable(): + Decompress compressed source `cSrc` of size `cSrcSize` using `dt` + into `dst` which must be already allocated. + @return : size of regenerated data (necessarily <= `dstCapacity`), + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); + +/*! +Tutorial : +---------- +(Note : these functions only decompress FSE-compressed blocks. + If block is uncompressed, use memcpy() instead + If block is a single repeated byte, use memset() instead ) + +The first step is to obtain the normalized frequencies of symbols. +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. +In practice, that means it's necessary to know 'maxSymbolValue' beforehand, +or size the table to handle worst case situations (typically 256). +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. +If there is an error, the function will return an error code, which can be tested using FSE_isError(). + +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. +This is performed by the function FSE_buildDTable(). +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). +If there is an error, the function will return an error code, which can be tested using FSE_isError(). + +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). +`cSrcSize` must be strictly correct, otherwise decompression will fail. +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) +*/ + +#endif /* FSE_H */ + +#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) +#define FSE_H_FSE_STATIC_LINKING_ONLY + +/* *** Dependency *** */ +#include "bitstream.h" + + +/* ***************************************** +* Static allocation +*******************************************/ +/* FSE buffer bounds */ +#define FSE_NCOUNTBOUND 512 +#define FSE_BLOCKBOUND(size) (size + (size>>7)) +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= `1024` unsigned + */ +size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace); + +/** FSE_countFast() : + * same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr + */ +size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + +/* FSE_countFast_wksp() : + * Same as FSE_countFast(), but using an externally provided scratch buffer. + * `workSpace` must be a table of minimum `1024` unsigned + */ +size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace); + +/*! FSE_count_simple() : + * Same as FSE_countFast(), but does not use any additional memory (not even on stack). + * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). +*/ +size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + + + +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); +/**< same as FSE_optimalTableLog(), which used `minus==2` */ + +/* FSE_compress_wksp() : + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). + * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. + */ +#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); + +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ + +size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ + +/* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * `wkspSize` must be >= `(1<= BIT_DStream_completed + +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. +Checking if DStream has reached its end is performed by : + BIT_endOfDStream(&DStream); +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. + FSE_endOfDState(&DState); +*/ + + +/* ***************************************** +* FSE unsafe API +*******************************************/ +static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ + + +/* ***************************************** +* Implementation of inlined functions +*******************************************/ +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; /* total 8 bytes */ + +MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) +{ + const void* ptr = ct; + const U16* u16ptr = (const U16*) ptr; + const U32 tableLog = MEM_read16(ptr); + statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; + statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1)); + statePtr->stateLog = tableLog; +} + + +/*! FSE_initCState2() : +* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) +* uses the smallest state value possible, saving the cost of this symbol */ +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) +{ + FSE_initCState(statePtr, ct); + { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* stateTable = (const U16*)(statePtr->stateTable); + U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); + statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; + statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; + } +} + +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol) +{ + FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* const stateTable = (const U16*)(statePtr->stateTable); + U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); + BIT_addBits(bitC, statePtr->value, nbBitsOut); + statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; +} + +MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) +{ + BIT_addBits(bitC, statePtr->value, statePtr->stateLog); + BIT_flushBits(bitC); +} + + +/* ====== Decompression ====== */ + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; /* sizeof U32 */ + +typedef struct +{ + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; /* size == U32 */ + +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + return DInfo.symbol; +} + +MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + size_t const lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.newState + lowBits; +} + +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + BYTE const symbol = DInfo.symbol; + size_t const lowBits = BIT_readBits(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +/*! FSE_decodeSymbolFast() : + unsafe, only works if no symbol has a probability > 50% */ +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + BYTE const symbol = DInfo.symbol; + size_t const lowBits = BIT_readBitsFast(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) +{ + return DStatePtr->state == 0; +} + + + +#ifndef FSE_COMMONDEFS_ONLY + +/* ************************************************************** +* Tuning parameters +****************************************************************/ +/*!MEMORY_USAGE : +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ +#ifndef FSE_MAX_MEMORY_USAGE +# define FSE_MAX_MEMORY_USAGE 14 +#endif +#ifndef FSE_DEFAULT_MEMORY_USAGE +# define FSE_DEFAULT_MEMORY_USAGE 13 +#endif + +/*!FSE_MAX_SYMBOL_VALUE : +* Maximum symbol value authorized. +* Required for proper stack allocation */ +#ifndef FSE_MAX_SYMBOL_VALUE +# define FSE_MAX_SYMBOL_VALUE 255 +#endif + +/* ************************************************************** +* template functions type & suffix +****************************************************************/ +#define FSE_FUNCTION_TYPE BYTE +#define FSE_FUNCTION_EXTENSION +#define FSE_DECODE_TYPE FSE_decode_t + + +#endif /* !FSE_COMMONDEFS_ONLY */ + + +/* *************************************************************** +* Constants +*****************************************************************/ +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) +#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX +# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" +#endif + +#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) + + +#endif /* FSE_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif diff --git a/vendor/github.com/DataDog/zstd/fse_compress.c b/vendor/github.com/DataDog/zstd/fse_compress.c new file mode 100644 index 00000000000..cb8f1fa3233 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/fse_compress.c @@ -0,0 +1,849 @@ +/* ****************************************************************** + FSE : Finite State Entropy encoder + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/* ************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include /* printf (debug) */ +#include "bitstream.h" +#include "compiler.h" +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" +#include "error_private.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define FSE_isError ERR_isError +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/* ************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ + +/* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * wkspSize should be sized to handle worst case situation, which is `1<>1 : 1) ; + FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); + U32 const step = FSE_TABLESTEP(tableSize); + U32 cumul[FSE_MAX_SYMBOL_VALUE+2]; + + FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace; + U32 highThreshold = tableSize-1; + + /* CTable header */ + if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge); + tableU16[-2] = (U16) tableLog; + tableU16[-1] = (U16) maxSymbolValue; + + /* For explanations on how to distribute symbol values over the table : + * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + + /* symbol start positions */ + { U32 u; + cumul[0] = 0; + for (u=1; u<=maxSymbolValue+1; u++) { + if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ + cumul[u] = cumul[u-1] + 1; + tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); + } else { + cumul[u] = cumul[u-1] + normalizedCounter[u-1]; + } } + cumul[maxSymbolValue+1] = tableSize+1; + } + + /* Spread symbols */ + { U32 position = 0; + U32 symbol; + for (symbol=0; symbol<=maxSymbolValue; symbol++) { + int nbOccurences; + for (nbOccurences=0; nbOccurences highThreshold) position = (position + step) & tableMask; /* Low proba area */ + } } + + if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */ + } + + /* Build table */ + { U32 u; for (u=0; u> 3) + 3; + return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ +} + +static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize, + const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, + unsigned writeIsSafe) +{ + BYTE* const ostart = (BYTE*) header; + BYTE* out = ostart; + BYTE* const oend = ostart + headerBufferSize; + int nbBits; + const int tableSize = 1 << tableLog; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + bitStream = 0; + bitCount = 0; + /* Table Size */ + bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount; + bitCount += 4; + + /* Init */ + remaining = tableSize+1; /* +1 for extra accuracy */ + threshold = tableSize; + nbBits = tableLog+1; + + while (remaining>1) { /* stops at 1 */ + if (previous0) { + unsigned start = charnum; + while (!normalizedCounter[charnum]) charnum++; + while (charnum >= start+24) { + start+=24; + bitStream += 0xFFFFU << bitCount; + if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE) bitStream; + out[1] = (BYTE)(bitStream>>8); + out+=2; + bitStream>>=16; + } + while (charnum >= start+3) { + start+=3; + bitStream += 3 << bitCount; + bitCount += 2; + } + bitStream += (charnum-start) << bitCount; + bitCount += 2; + if (bitCount>16) { + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out += 2; + bitStream >>= 16; + bitCount -= 16; + } } + { int count = normalizedCounter[charnum++]; + int const max = (2*threshold-1)-remaining; + remaining -= count < 0 ? -count : count; + count++; /* +1 for extra accuracy */ + if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ + bitStream += count << bitCount; + bitCount += nbBits; + bitCount -= (count>=1; } + } + if (bitCount>16) { + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out += 2; + bitStream >>= 16; + bitCount -= 16; + } } + + /* flush remaining bitStream */ + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out+= (bitCount+7) /8; + + if (charnum > maxSymbolValue + 1) return ERROR(GENERIC); + + return (out-ostart); +} + + +size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ + if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ + + if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); + + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); +} + + + +/*-************************************************************** +* Counting histogram +****************************************************************/ +/*! FSE_count_simple + This function counts byte values within `src`, and store the histogram into table `count`. + It doesn't use any additional memory. + But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. + For this reason, prefer using a table `count` with 256 elements. + @return : count of most numerous element. +*/ +size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* const end = ip + srcSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + + memset(count, 0, (maxSymbolValue+1)*sizeof(*count)); + if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } + + while (ip max) max = count[s]; } + + return (size_t)max; +} + + +/* FSE_count_parallel_wksp() : + * Same as FSE_count_parallel(), but using an externally provided scratch buffer. + * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`. + * @return : largest histogram frequency, or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */ +static size_t FSE_count_parallel_wksp( + unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + unsigned checkMax, unsigned* const workSpace) +{ + const BYTE* ip = (const BYTE*)source; + const BYTE* const iend = ip+sourceSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + U32* const Counting1 = workSpace; + U32* const Counting2 = Counting1 + 256; + U32* const Counting3 = Counting2 + 256; + U32* const Counting4 = Counting3 + 256; + + memset(workSpace, 0, 4*256*sizeof(unsigned)); + + /* safety checks */ + if (!sourceSize) { + memset(count, 0, maxSymbolValue + 1); + *maxSymbolValuePtr = 0; + return 0; + } + if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ + + /* by stripes of 16 bytes */ + { U32 cached = MEM_read32(ip); ip += 4; + while (ip < iend-15) { + U32 c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + } + ip-=4; + } + + /* finish last symbols */ + while (ipmaxSymbolValue; s--) { + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; + if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); + } } + + { U32 s; + if (maxSymbolValue > 255) maxSymbolValue = 255; + for (s=0; s<=maxSymbolValue; s++) { + count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; + if (count[s] > max) max = count[s]; + } } + + while (!count[maxSymbolValue]) maxSymbolValue--; + *maxSymbolValuePtr = maxSymbolValue; + return (size_t)max; +} + +/* FSE_countFast_wksp() : + * Same as FSE_countFast(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned */ +size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + unsigned* workSpace) +{ + if (sourceSize < 1500) /* heuristic threshold */ + return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); +} + +/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ +size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize) +{ + unsigned tmpCounters[1024]; + return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters); +} + +/* FSE_count_wksp() : + * Same as FSE_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned */ +size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace) +{ + if (*maxSymbolValuePtr < 255) + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); + *maxSymbolValuePtr = 255; + return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); +} + +size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + unsigned tmpCounters[1024]; + return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters); +} + + + +/*-************************************************************** +* FSE Compression Code +****************************************************************/ +/*! FSE_sizeof_CTable() : + FSE_CTable is a variable size structure which contains : + `U16 tableLog;` + `U16 maxSymbolValue;` + `U16 nextStateNumber[1 << tableLog];` // This size is variable + `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable +Allocation is manual (C standard does not support variable-size structures). +*/ +size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog) +{ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); +} + +FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) +{ + size_t size; + if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; + size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); + return (FSE_CTable*)malloc(size); +} + +void FSE_freeCTable (FSE_CTable* ct) { free(ct); } + +/* provides the minimum logSize to safely represent a distribution */ +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) +{ + U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; + U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; + U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; + assert(srcSize > 1); /* Not supported, RLE should be used instead */ + return minBits; +} + +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) +{ + U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; + U32 tableLog = maxTableLog; + U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); + assert(srcSize > 1); /* Not supported, RLE should be used instead */ + if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; + if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */ + if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */ + if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG; + if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG; + return tableLog; +} + +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +{ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); +} + + +/* Secondary normalization method. + To be used when primary method fails. */ + +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue) +{ + short const NOT_YET_ASSIGNED = -2; + U32 s; + U32 distributed = 0; + U32 ToDistribute; + + /* Init */ + U32 const lowThreshold = (U32)(total >> tableLog); + U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); + + for (s=0; s<=maxSymbolValue; s++) { + if (count[s] == 0) { + norm[s]=0; + continue; + } + if (count[s] <= lowThreshold) { + norm[s] = -1; + distributed++; + total -= count[s]; + continue; + } + if (count[s] <= lowOne) { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } + + norm[s]=NOT_YET_ASSIGNED; + } + ToDistribute = (1 << tableLog) - distributed; + + if ((total / ToDistribute) > lowOne) { + /* risk of rounding to zero */ + lowOne = (U32)((total * 3) / (ToDistribute * 2)); + for (s=0; s<=maxSymbolValue; s++) { + if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } } + ToDistribute = (1 << tableLog) - distributed; + } + + if (distributed == maxSymbolValue+1) { + /* all values are pretty poor; + probably incompressible data (should have already been detected); + find max, then give all remaining points to max */ + U32 maxV = 0, maxC = 0; + for (s=0; s<=maxSymbolValue; s++) + if (count[s] > maxC) { maxV=s; maxC=count[s]; } + norm[maxV] += (short)ToDistribute; + return 0; + } + + if (total == 0) { + /* all of the symbols were low enough for the lowOne or lowThreshold */ + for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1)) + if (norm[s] > 0) { ToDistribute--; norm[s]++; } + return 0; + } + + { U64 const vStepLog = 62 - tableLog; + U64 const mid = (1ULL << (vStepLog-1)) - 1; + U64 const rStep = ((((U64)1<> vStepLog); + U32 const sEnd = (U32)(end >> vStepLog); + U32 const weight = sEnd - sStart; + if (weight < 1) + return ERROR(GENERIC); + norm[s] = (short)weight; + tmpTotal = end; + } } } + + return 0; +} + + +size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, + const unsigned* count, size_t total, + unsigned maxSymbolValue) +{ + /* Sanity checks */ + if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; + if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ + if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ + + { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + U64 const scale = 62 - tableLog; + U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ + U64 const vStep = 1ULL<<(scale-20); + int stillToDistribute = 1<> tableLog); + + for (s=0; s<=maxSymbolValue; s++) { + if (count[s] == total) return 0; /* rle special case */ + if (count[s] == 0) { normalizedCounter[s]=0; continue; } + if (count[s] <= lowThreshold) { + normalizedCounter[s] = -1; + stillToDistribute--; + } else { + short proba = (short)((count[s]*step) >> scale); + if (proba<8) { + U64 restToBeat = vStep * rtbTable[proba]; + proba += (count[s]*step) - ((U64)proba< restToBeat; + } + if (proba > largestP) { largestP=proba; largest=s; } + normalizedCounter[s] = proba; + stillToDistribute -= proba; + } } + if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { + /* corner case, need another normalization method */ + size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); + if (FSE_isError(errorCode)) return errorCode; + } + else normalizedCounter[largest] += (short)stillToDistribute; + } + +#if 0 + { /* Print Table (debug) */ + U32 s; + U32 nTotal = 0; + for (s=0; s<=maxSymbolValue; s++) + printf("%3i: %4i \n", s, normalizedCounter[s]); + for (s=0; s<=maxSymbolValue; s++) + nTotal += abs(normalizedCounter[s]); + if (nTotal != (1U<>1); /* assumption : tableLog >= 1 */ + FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* header */ + tableU16[-2] = (U16) nbBits; + tableU16[-1] = (U16) maxSymbolValue; + + /* Build table */ + for (s=0; s FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */ + FSE_encodeSymbol(&bitC, &CState2, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + FSE_FLUSHBITS(&bitC); + } + + /* 2 or 4 encoding per loop */ + while ( ip>istart ) { + + FSE_encodeSymbol(&bitC, &CState2, *--ip); + + if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */ + FSE_FLUSHBITS(&bitC); + + FSE_encodeSymbol(&bitC, &CState1, *--ip); + + if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */ + FSE_encodeSymbol(&bitC, &CState2, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + } + + FSE_FLUSHBITS(&bitC); + } + + FSE_flushCState(&bitC, &CState2); + FSE_flushCState(&bitC, &CState1); + return BIT_closeCStream(&bitC); +} + +size_t FSE_compress_usingCTable (void* dst, size_t dstSize, + const void* src, size_t srcSize, + const FSE_CTable* ct) +{ + unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); + + if (fast) + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); + else + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); +} + + +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } + +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e +#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } + +/* FSE_compress_wksp() : + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). + * `wkspSize` size must be `(1< not compressible */ + if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ + } + + tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); + CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); + + /* Write table description header */ + { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += nc_err; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } + + /* check compressibility */ + if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; + + return op-ostart; +} + +typedef struct { + FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; + BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; +} fseWkspMax_t; + +size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) +{ + fseWkspMax_t scratchBuffer; + FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); +} + +size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); +} + + +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/vendor/github.com/DataDog/zstd/fse_decompress.c b/vendor/github.com/DataDog/zstd/fse_decompress.c new file mode 100644 index 00000000000..4c66c3b7746 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/fse_decompress.c @@ -0,0 +1,309 @@ +/* ****************************************************************** + FSE : Finite State Entropy decoder + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + + +/* ************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include "bitstream.h" +#include "compiler.h" +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" +#include "error_private.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define FSE_isError ERR_isError +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + +/* check and forward error code */ +#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; } + + +/* ************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ +FSE_DTable* FSE_createDTable (unsigned tableLog) +{ + if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; + return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); +} + +void FSE_freeDTable (FSE_DTable* dt) +{ + free(dt); +} + +size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ + FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); + U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + + U32 const maxSV1 = maxSymbolValue + 1; + U32 const tableSize = 1 << tableLog; + U32 highThreshold = tableSize-1; + + /* Sanity Checks */ + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + + /* Init, lay down lowprob symbols */ + { FSE_DTableHeader DTableH; + DTableH.tableLog = (U16)tableLog; + DTableH.fastMode = 1; + { S16 const largeLimit= (S16)(1 << (tableLog-1)); + U32 s; + for (s=0; s= largeLimit) DTableH.fastMode=0; + symbolNext[s] = normalizedCounter[s]; + } } } + memcpy(dt, &DTableH, sizeof(DTableH)); + } + + /* Spread symbols */ + { U32 const tableMask = tableSize-1; + U32 const step = FSE_TABLESTEP(tableSize); + U32 s, position = 0; + for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } } + if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + } + + /* Build Decoding table */ + { U32 u; + for (u=0; utableLog = 0; + DTableH->fastMode = 0; + + cell->newState = 0; + cell->symbol = symbolValue; + cell->nbBits = 0; + + return 0; +} + + +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + void* dPtr = dt + 1; + FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSV1 = tableMask+1; + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* Build Decoding Table */ + DTableH->tableLog = (U16)nbBits; + DTableH->fastMode = 1; + for (s=0; s sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[1] = FSE_GETSYMBOL(&state2); + + if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } + + op[2] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[3] = FSE_GETSYMBOL(&state2); + } + + /* tail */ + /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ + while (1) { + if (op>(omax-2)) return ERROR(dstSize_tooSmall); + *op++ = FSE_GETSYMBOL(&state1); + if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { + *op++ = FSE_GETSYMBOL(&state2); + break; + } + + if (op>(omax-2)) return ERROR(dstSize_tooSmall); + *op++ = FSE_GETSYMBOL(&state2); + if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { + *op++ = FSE_GETSYMBOL(&state1); + break; + } } + + return op-ostart; +} + + +size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; + const U32 fastMode = DTableH->fastMode; + + /* select fast mode (static) */ + if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); +} + + +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog) +{ + const BYTE* const istart = (const BYTE*)cSrc; + const BYTE* ip = istart; + short counting[FSE_MAX_SYMBOL_VALUE+1]; + unsigned tableLog; + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + + /* normal FSE decoding mode */ + size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); + if (FSE_isError(NCountLength)) return NCountLength; + //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */ + if (tableLog > maxLog) return ERROR(tableLog_tooLarge); + ip += NCountLength; + cSrcSize -= NCountLength; + + CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) ); + + return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */ +} + + +typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; + +size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) +{ + DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ + return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG); +} + + + +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/vendor/github.com/DataDog/zstd/huf.h b/vendor/github.com/DataDog/zstd/huf.h new file mode 100644 index 00000000000..b4645b4e519 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/huf.h @@ -0,0 +1,327 @@ +/* ****************************************************************** + Huffman coder, part of New Generation Entropy library + header file + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef HUF_H_298734234 +#define HUF_H_298734234 + +/* *** Dependencies *** */ +#include /* size_t */ + + +/* *** library symbols visibility *** */ +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, + * HUF symbols remain "private" (internal symbols for library only). + * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) +# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ +# define HUF_PUBLIC_API __declspec(dllexport) +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) +# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ +#else +# define HUF_PUBLIC_API +#endif + + +/* ========================== */ +/* *** simple functions *** */ +/* ========================== */ + +/** HUF_compress() : + * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. + * 'dst' buffer must be already allocated. + * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). + * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. + * @return : size of compressed data (<= `dstCapacity`). + * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! + * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) + */ +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +/** HUF_decompress() : + * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', + * into already allocated buffer 'dst', of minimum size 'dstSize'. + * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. + * Note : in contrast with FSE, HUF_decompress can regenerate + * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, + * because it knows size to regenerate (originalSize). + * @return : size of regenerated data (== originalSize), + * or an error code, which can be tested using HUF_isError() + */ +HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize); + + +/* *** Tool functions *** */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ + +/* Error Management */ +HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ + + +/* *** Advanced function *** */ + +/** HUF_compress2() : + * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. + * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . + * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog); + +/** HUF_compress4X_wksp() : + * Same as HUF_compress2(), but uses externally allocated `workSpace`. + * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */ +#define HUF_WORKSPACE_SIZE (6 << 10) +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize); + +#endif /* HUF_H_298734234 */ + +/* ****************************************************************** + * WARNING !! + * The following section contains advanced and experimental definitions + * which shall never be used in the context of a dynamic library, + * because they are not guaranteed to remain stable in the future. + * Only consider them in association with static linking. + * *****************************************************************/ +#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) +#define HUF_H_HUF_STATIC_LINKING_ONLY + +/* *** Dependencies *** */ +#include "mem.h" /* U32 */ + + +/* *** Constants *** */ +#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ +#define HUF_SYMBOLVALUE_MAX 255 + +#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) +# error "HUF_TABLELOG_MAX is too large !" +#endif + + +/* **************************************** +* Static allocation +******************************************/ +/* HUF buffer bounds */ +#define HUF_CTABLEBOUND 129 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* static allocation of HUF's Compression Table */ +#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ +#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ + U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \ + void* name##hv = &(name##hb); \ + HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ + +/* static allocation of HUF's DTable */ +typedef U32 HUF_DTable; +#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } +#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } + + +/* **************************************** +* Advanced decompression functions +******************************************/ +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ + +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ +size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ +size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ + + +/* **************************************** + * HUF detailed API + * ****************************************/ + +/*! HUF_compress() does the following: + * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") + * 2. (optional) refine tableLog using HUF_optimalTableLog() + * 3. build Huffman table from count using HUF_buildCTable() + * 4. save Huffman table to memory buffer using HUF_writeCTable() + * 5. encode the data stream using HUF_compress4X_usingCTable() + * + * The following API allows targeting specific sub-functions for advanced tasks. + * For example, it's possible to compress several blocks using the same 'CTable', + * or to save and regenerate 'CTable' using external methods. + */ +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); +typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ +size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ +size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); + +typedef enum { + HUF_repeat_none, /**< Cannot use the previous table */ + HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ + HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */ + } HUF_repeat; +/** HUF_compress4X_repeat() : + * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. + * If it uses hufTable it does not modify hufTable or repeat. + * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. + * If preferRepeat then the old table will always be used if valid. */ +size_t HUF_compress4X_repeat(void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + +/** HUF_buildCTable_wksp() : + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. + * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. + */ +#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) +#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); + +/*! HUF_readStats() : + * Read compact Huffman tree, saved by HUF_writeCTable(). + * `huffWeight` is destination buffer. + * @return : size read from `src` , or an error Code . + * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, + U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize); + +/** HUF_readCTable() : + * Loading a CTable saved with HUF_writeCTable() */ +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + + +/* + * HUF_decompress() does the following: + * 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics + * 2. build Huffman table from save, using HUF_readDTableX?() + * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() + */ + +/** HUF_selectDecoder() : + * Tells which decoder is likely to decode faster, + * based on a set of pre-computed metrics. + * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . + * Assumption : 0 < dstSize <= 128 KB */ +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); + +/** + * The minimum workspace size for the `workSpace` used in + * HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp(). + * + * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when + * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. + * Buffer overflow errors may potentially occur if code modifications result in + * a required workspace size greater than that specified in the following + * macro. + */ +#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10) +#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) + +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); +size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); +size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize); +size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); + +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); + + +/* ====================== */ +/* single stream variants */ +/* ====================== */ + +size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +/** HUF_compress1X_repeat() : + * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. + * If it uses hufTable it does not modify hufTable or repeat. + * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. + * If preferRepeat then the old table will always be used if valid. */ +size_t HUF_compress1X_repeat(void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ +size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ + +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); +size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ +size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ +size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ + +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ +size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); + +/* BMI2 variants. + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. + */ +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); + +#endif /* HUF_STATIC_LINKING_ONLY */ + +#if defined (__cplusplus) +} +#endif diff --git a/vendor/github.com/DataDog/zstd/huf_compress.c b/vendor/github.com/DataDog/zstd/huf_compress.c new file mode 100644 index 00000000000..83230b415f9 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/huf_compress.c @@ -0,0 +1,788 @@ +/* ****************************************************************** + Huffman encoder, part of New Generation Entropy library + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + + +/* ************************************************************** +* Includes +****************************************************************/ +#include /* memcpy, memset */ +#include /* printf (debug) */ +#include "bitstream.h" +#include "compiler.h" +#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ +#include "fse.h" /* header compression */ +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#include "error_private.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define HUF_isError ERR_isError +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e +#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } + + +/* ************************************************************** +* Utils +****************************************************************/ +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +{ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); +} + + +/* ******************************************************* +* HUF : Huffman block compression +*********************************************************/ +/* HUF_compressWeights() : + * Same as FSE_compress(), but dedicated to huff0's weights compression. + * The use case needs much less stack memory. + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. + */ +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 +size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const oend = ostart + dstSize; + + U32 maxSymbolValue = HUF_TABLELOG_MAX; + U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; + + FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; + BYTE scratchBuffer[1< not compressible */ + } + + tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); + CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); + + /* Write table description header */ + { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += hSize; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } + + return op-ostart; +} + + +struct HUF_CElt_s { + U16 val; + BYTE nbBits; +}; /* typedef'd to HUF_CElt within "huf.h" */ + +/*! HUF_writeCTable() : + `CTable` : Huffman tree to save, using huf representation. + @return : size of saved CTable */ +size_t HUF_writeCTable (void* dst, size_t maxDstSize, + const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog) +{ + BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ + BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; + BYTE* op = (BYTE*)dst; + U32 n; + + /* check conditions */ + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); + + /* convert to weight */ + bitsToWeight[0] = 0; + for (n=1; n1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ + op[0] = (BYTE)hSize; + return hSize+1; + } } + + /* write raw values as 4-bits (max : 15) */ + if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ + if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ + op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); + huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ + for (n=0; n HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); + + /* Prepare base value per rank */ + { U32 n, nextRankStart = 0; + for (n=1; n<=tableLog; n++) { + U32 current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } } + + /* fill nbBits */ + { U32 n; for (n=0; nn=tableLog+1 */ + U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; + { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + /* assign value within rank, symbol order */ + { U32 n; for (n=0; n maxNbBits */ + + /* there are several too large elements (at least >= 2) */ + { int totalCost = 0; + const U32 baseCost = 1 << (largestBits - maxNbBits); + U32 n = lastNonNull; + + while (huffNode[n].nbBits > maxNbBits) { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); + huffNode[n].nbBits = (BYTE)maxNbBits; + n --; + } /* n stops at huffNode[n].nbBits <= maxNbBits */ + while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ + + /* renorm totalCost */ + totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ + + /* repay normalized cost */ + { U32 const noSymbol = 0xF0F0F0F0; + U32 rankLast[HUF_TABLELOG_MAX+2]; + int pos; + + /* Get pos of last (smallest) symbol per rank */ + memset(rankLast, 0xF0, sizeof(rankLast)); + { U32 currentNbBits = maxNbBits; + for (pos=n ; pos >= 0; pos--) { + if (huffNode[pos].nbBits >= currentNbBits) continue; + currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ + rankLast[maxNbBits-currentNbBits] = pos; + } } + + while (totalCost > 0) { + U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; + for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { + U32 highPos = rankLast[nBitsToDecrease]; + U32 lowPos = rankLast[nBitsToDecrease-1]; + if (highPos == noSymbol) continue; + if (lowPos == noSymbol) break; + { U32 const highTotal = huffNode[highPos].count; + U32 const lowTotal = 2 * huffNode[lowPos].count; + if (highTotal <= lowTotal) break; + } } + /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ + /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ + while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) + nBitsToDecrease ++; + totalCost -= 1 << (nBitsToDecrease-1); + if (rankLast[nBitsToDecrease-1] == noSymbol) + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ + huffNode[rankLast[nBitsToDecrease]].nbBits ++; + if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol; + else { + rankLast[nBitsToDecrease]--; + if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) + rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ + } } /* while (totalCost > 0) */ + + while (totalCost < 0) { /* Sometimes, cost correction overshoot */ + if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + while (huffNode[n].nbBits == maxNbBits) n--; + huffNode[n+1].nbBits--; + rankLast[1] = n+1; + totalCost++; + continue; + } + huffNode[ rankLast[1] + 1 ].nbBits--; + rankLast[1]++; + totalCost ++; + } } } /* there are several too large elements (at least >= 2) */ + + return maxNbBits; +} + + +typedef struct { + U32 base; + U32 current; +} rankPos; + +static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) +{ + rankPos rank[32]; + U32 n; + + memset(rank, 0, sizeof(rank)); + for (n=0; n<=maxSymbolValue; n++) { + U32 r = BIT_highbit32(count[n] + 1); + rank[r].base ++; + } + for (n=30; n>0; n--) rank[n-1].base += rank[n].base; + for (n=0; n<32; n++) rank[n].current = rank[n].base; + for (n=0; n<=maxSymbolValue; n++) { + U32 const c = count[n]; + U32 const r = BIT_highbit32(c+1) + 1; + U32 pos = rank[r].current++; + while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) { + huffNode[pos] = huffNode[pos-1]; + pos--; + } + huffNode[pos].count = c; + huffNode[pos].byte = (BYTE)n; + } +} + + +/** HUF_buildCTable_wksp() : + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned. + */ +#define STARTNODE (HUF_SYMBOLVALUE_MAX+1) +typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) +{ + nodeElt* const huffNode0 = (nodeElt*)workSpace; + nodeElt* const huffNode = huffNode0+1; + U32 n, nonNullRank; + int lowS, lowN; + U16 nodeNb = STARTNODE; + U32 nodeRoot; + + /* safety checks */ + if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ + if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall); + if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); + memset(huffNode0, 0, sizeof(huffNodeTable)); + + /* sort, decreasing order */ + HUF_sort(huffNode, count, maxSymbolValue); + + /* init for parents */ + nonNullRank = maxSymbolValue; + while(huffNode[nonNullRank].count == 0) nonNullRank--; + lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; + huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb; + nodeNb++; lowS-=2; + for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); + huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ + + /* create parents */ + while (nodeNb <= nodeRoot) { + U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; + huffNode[n1].parent = huffNode[n2].parent = nodeNb; + nodeNb++; + } + + /* distribute weights (unlimited tree height) */ + huffNode[nodeRoot].nbBits = 0; + for (n=nodeRoot-1; n>=STARTNODE; n--) + huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + for (n=0; n<=nonNullRank; n++) + huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + + /* enforce maxTableLog */ + maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); + + /* fill result into tree (val, nbBits) */ + { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; + U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; + if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ + for (n=0; n<=nonNullRank; n++) + nbPerRank[huffNode[n].nbBits]++; + /* determine stating value per rank */ + { U16 min = 0; + for (n=maxNbBits; n>0; n--) { + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + for (n=0; n<=maxSymbolValue; n++) + tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ + for (n=0; n<=maxSymbolValue; n++) + tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ + } + + return maxNbBits; +} + +/** HUF_buildCTable() : + * @return : maxNbBits + * Note : count is used before tree is written, so they can safely overlap + */ +size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits) +{ + huffNodeTable nodeTable; + return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable)); +} + +static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) +{ + size_t nbBits = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) { + nbBits += CTable[s].nbBits * count[s]; + } + return nbBits >> 3; +} + +static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + int bad = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) { + bad |= (count[s] != 0) & (CTable[s].nbBits == 0); + } + return !bad; +} + +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } + +FORCE_INLINE_TEMPLATE void +HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) +{ + BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); +} + +#define HUF_FLUSHBITS(s) BIT_flushBits(s) + +#define HUF_FLUSHBITS_1(stream) \ + if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) + +#define HUF_FLUSHBITS_2(stream) \ + if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) + +FORCE_INLINE_TEMPLATE size_t +HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable) +{ + const BYTE* ip = (const BYTE*) src; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + size_t n; + BIT_CStream_t bitC; + + /* init */ + if (dstSize < 8) return 0; /* not enough space to compress */ + { size_t const initErr = BIT_initCStream(&bitC, op, oend-op); + if (HUF_isError(initErr)) return 0; } + + n = srcSize & ~3; /* join to mod 4 */ + switch (srcSize & 3) + { + case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); + HUF_FLUSHBITS_2(&bitC); + /* fall-through */ + case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); + HUF_FLUSHBITS_1(&bitC); + /* fall-through */ + case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); + HUF_FLUSHBITS(&bitC); + /* fall-through */ + case 0 : /* fall-through */ + default: break; + } + + for (; n>0; n-=4) { /* note : n&3==0 at this stage */ + HUF_encodeSymbol(&bitC, ip[n- 1], CTable); + HUF_FLUSHBITS_1(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 2], CTable); + HUF_FLUSHBITS_2(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 3], CTable); + HUF_FLUSHBITS_1(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 4], CTable); + HUF_FLUSHBITS(&bitC); + } + + return BIT_closeCStream(&bitC); +} + +#if DYNAMIC_BMI2 + +static TARGET_ATTRIBUTE("bmi2") size_t +HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable) +{ + return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); +} + +static size_t +HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable) +{ + return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); +} + +static size_t +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable, const int bmi2) +{ + if (bmi2) { + return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); + } + return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); +} + +#else + +static size_t +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable, const int bmi2) +{ + (void)bmi2; + return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); +} + +#endif + +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +{ + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); +} + + +static size_t +HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable, int bmi2) +{ + size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ + const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + + if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ + if (srcSize < 12) return 0; /* no saving possible : too small input */ + op += 6; /* jumpTable */ + + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) ); + if (cSize==0) return 0; + assert(cSize <= 65535); + MEM_writeLE16(ostart, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) ); + if (cSize==0) return 0; + assert(cSize <= 65535); + MEM_writeLE16(ostart+2, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) ); + if (cSize==0) return 0; + assert(cSize <= 65535); + MEM_writeLE16(ostart+4, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) ); + if (cSize==0) return 0; + op += cSize; + } + + return op-ostart; +} + +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +{ + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); +} + + +static size_t HUF_compressCTable_internal( + BYTE* const ostart, BYTE* op, BYTE* const oend, + const void* src, size_t srcSize, + unsigned singleStream, const HUF_CElt* CTable, const int bmi2) +{ + size_t const cSize = singleStream ? + HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) : + HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2); + if (HUF_isError(cSize)) { return cSize; } + if (cSize==0) { return 0; } /* uncompressible */ + op += cSize; + /* check compressibility */ + if ((size_t)(op-ostart) >= srcSize-1) { return 0; } + return op-ostart; +} + +typedef struct { + U32 count[HUF_SYMBOLVALUE_MAX + 1]; + HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; + huffNodeTable nodeTable; +} HUF_compress_tables_t; + +/* HUF_compress_internal() : + * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ +static size_t HUF_compress_internal ( + void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + unsigned singleStream, + void* workSpace, size_t wkspSize, + HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, + const int bmi2) +{ + HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + + /* checks & inits */ + if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ + if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); + if (!srcSize) return 0; /* Uncompressed */ + if (!dstSize) return 0; /* cannot fit anything within dst budget */ + if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ + if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); + if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; + if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; + + /* Heuristic : If old table is valid, use it for small inputs */ + if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { + return HUF_compressCTable_internal(ostart, op, oend, + src, srcSize, + singleStream, oldHufTable, bmi2); + } + + /* Scan input and build symbol stats */ + { CHECK_V_F(largest, FSE_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) ); + if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ + if (largest <= (srcSize >> 7)+1) return 0; /* heuristic : probably not compressible enough */ + } + + /* Check validity of previous table */ + if ( repeat + && *repeat == HUF_repeat_check + && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { + *repeat = HUF_repeat_none; + } + /* Heuristic : use existing table for small inputs */ + if (preferRepeat && repeat && *repeat != HUF_repeat_none) { + return HUF_compressCTable_internal(ostart, op, oend, + src, srcSize, + singleStream, oldHufTable, bmi2); + } + + /* Build Huffman Tree */ + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + { CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count, + maxSymbolValue, huffLog, + table->nodeTable, sizeof(table->nodeTable)) ); + huffLog = (U32)maxBits; + /* Zero unused symbols in CTable, so we can check it for validity */ + memset(table->CTable + (maxSymbolValue + 1), 0, + sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt))); + } + + /* Write table description header */ + { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) ); + /* Check if using previous huffman table is beneficial */ + if (repeat && *repeat != HUF_repeat_none) { + size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); + size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); + if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { + return HUF_compressCTable_internal(ostart, op, oend, + src, srcSize, + singleStream, oldHufTable, bmi2); + } } + + /* Use the new huffman table */ + if (hSize + 12ul >= srcSize) { return 0; } + op += hSize; + if (repeat) { *repeat = HUF_repeat_none; } + if (oldHufTable) + memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ + } + return HUF_compressCTable_internal(ostart, op, oend, + src, srcSize, + singleStream, table->CTable, bmi2); +} + + +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, + maxSymbolValue, huffLog, 1 /*single stream*/, + workSpace, wkspSize, + NULL, NULL, 0, 0 /*bmi2*/); +} + +size_t HUF_compress1X_repeat (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize, + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, + maxSymbolValue, huffLog, 1 /*single stream*/, + workSpace, wkspSize, hufTable, + repeat, preferRepeat, bmi2); +} + +size_t HUF_compress1X (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog) +{ + unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; + return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); +} + +/* HUF_compress4X_repeat(): + * compress input using 4 streams. + * provide workspace to generate compression tables */ +size_t HUF_compress4X_wksp (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, + maxSymbolValue, huffLog, 0 /*4 streams*/, + workSpace, wkspSize, + NULL, NULL, 0, 0 /*bmi2*/); +} + +/* HUF_compress4X_repeat(): + * compress input using 4 streams. + * re-use an existing huffman compression table */ +size_t HUF_compress4X_repeat (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize, + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, + maxSymbolValue, huffLog, 0 /* 4 streams */, + workSpace, wkspSize, + hufTable, repeat, preferRepeat, bmi2); +} + +size_t HUF_compress2 (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog) +{ + unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; + return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); +} + +size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); +} diff --git a/vendor/github.com/DataDog/zstd/huf_decompress.c b/vendor/github.com/DataDog/zstd/huf_decompress.c new file mode 100644 index 00000000000..73f5c46c061 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/huf_decompress.c @@ -0,0 +1,1096 @@ +/* ****************************************************************** + Huffman decoder, part of New Generation Entropy library + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/* ************************************************************** +* Dependencies +****************************************************************/ +#include /* memcpy, memset */ +#include "bitstream.h" /* BIT_* */ +#include "compiler.h" +#include "fse.h" /* header compression */ +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#include "error_private.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define HUF_isError ERR_isError +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; } + + +/* ************************************************************** +* Byte alignment for workSpace management +****************************************************************/ +#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1) +#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) + + +/*-***************************/ +/* generic DTableDesc */ +/*-***************************/ +typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; + +static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) +{ + DTableDesc dtd; + memcpy(&dtd, table, sizeof(dtd)); + return dtd; +} + + +/*-***************************/ +/* single-symbol decoding */ +/*-***************************/ +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ + +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) +{ + U32 tableLog = 0; + U32 nbSymbols = 0; + size_t iSize; + void* const dtPtr = DTable + 1; + HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; + + U32* rankVal; + BYTE* huffWeight; + size_t spaceUsed32 = 0; + + rankVal = (U32 *)workSpace + spaceUsed32; + spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; + huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32); + spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; + + if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); + + HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); + /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* Table header */ + { DTableDesc dtd = HUF_getDTableDesc(DTable); + if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ + dtd.tableType = 0; + dtd.tableLog = (BYTE)tableLog; + memcpy(DTable, &dtd, sizeof(dtd)); + } + + /* Calculate starting value for each rank */ + { U32 n, nextRankStart = 0; + for (n=1; n> 1; + U32 u; + HUF_DEltX2 D; + D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); + for (u = rankVal[w]; u < rankVal[w] + length; u++) + dt[u] = D; + rankVal[w] += length; + } } + + return iSize; +} + +size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_readDTableX2_wksp(DTable, src, srcSize, + workSpace, sizeof(workSpace)); +} + +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ + +FORCE_INLINE_TEMPLATE BYTE +HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ + BYTE const c = dt[val].byte; + BIT_skipBits(Dstream, dt[val].nbBits); + return c; +} + +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ + *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +HINT_INLINE size_t +HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 4 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + + /* [0-3] symbols remaining */ + if (MEM_32bits()) + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + /* no more data to retrieve from bitstream, no need to reload */ + while (p < pEnd) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + return pEnd-pStart; +} + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress1X2_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + dstSize; + const void* dtPtr = DTable + 1; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; + BIT_DStream_t bitD; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); + + HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); + + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + return dstSize; +} + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress4X2_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + /* Check */ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable + 1; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + const size_t segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal = BIT_DStream_unfinished; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); + CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); + CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); + CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); + + /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) { + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + BIT_reloadDStream(&bitD1); + BIT_reloadDStream(&bitD2); + BIT_reloadDStream(&bitD3); + BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + /* note : should not be necessary : op# advance in lock step, and we control op4. + * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 supposed already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + + /* check */ + { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endCheck) return ERROR(corruption_detected); } + + /* decoded size */ + return dstSize; + } +} + + +FORCE_INLINE_TEMPLATE U32 +HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; +} + +FORCE_INLINE_TEMPLATE U32 +HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 1); + if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); + else { + if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { + BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) + /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ + DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); + } } + return 1; +} + +#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +HINT_INLINE size_t +HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, + const HUF_DEltX4* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_1(p, bitDPtr); + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + } + + /* closer to end : up to 2 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + + if (p < pEnd) + p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); + + return p-pStart; +} + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress1X4_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BIT_DStream_t bitD; + + /* Init */ + CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); + + /* decode */ + { BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ + const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); + } + + /* check */ + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; +} + + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress4X4_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; + const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + size_t const segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); + CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); + CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); + CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) { + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_1(op1, &bitD1); + HUF_DECODE_SYMBOLX4_1(op2, &bitD2); + HUF_DECODE_SYMBOLX4_1(op3, &bitD3); + HUF_DECODE_SYMBOLX4_1(op4, &bitD4); + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_0(op1, &bitD1); + HUF_DECODE_SYMBOLX4_0(op2, &bitD2); + HUF_DECODE_SYMBOLX4_0(op3, &bitD3); + HUF_DECODE_SYMBOLX4_0(op4, &bitD4); + + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); + + /* check */ + { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endCheck) return ERROR(corruption_detected); } + + /* decoded size */ + return dstSize; + } +} + + +typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, + const void *cSrc, + size_t cSrcSize, + const HUF_DTable *DTable); +#if DYNAMIC_BMI2 + +#define X(fn) \ + \ + static size_t fn##_default( \ + void* dst, size_t dstSize, \ + const void* cSrc, size_t cSrcSize, \ + const HUF_DTable* DTable) \ + { \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + \ + static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ + void* dst, size_t dstSize, \ + const void* cSrc, size_t cSrcSize, \ + const HUF_DTable* DTable) \ + { \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + \ + static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + { \ + if (bmi2) { \ + return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ + } + +#else + +#define X(fn) \ + static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + { \ + (void)bmi2; \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } + +#endif + +X(HUF_decompress1X2_usingDTable_internal) +X(HUF_decompress4X2_usingDTable_internal) +X(HUF_decompress1X4_usingDTable_internal) +X(HUF_decompress4X4_usingDTable_internal) + +#undef X + + +size_t HUF_decompress1X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 0) return ERROR(GENERIC); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); +} + + +size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); + return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); +} + +size_t HUF_decompress4X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 0) return ERROR(GENERIC); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize, int bmi2) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize, + workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); +} + +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); +} + + +size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); + return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + + +/* *************************/ +/* double-symbols decoding */ +/* *************************/ +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; + +/* HUF_fillDTableX4Level2() : + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ +static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, + const U32* rankValOrigin, const int minWeight, + const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, + U32 nbBitsBaseline, U16 baseSeq) +{ + HUF_DEltX4 DElt; + U32 rankVal[HUF_TABLELOG_MAX + 1]; + + /* get pre-calculated rankVal */ + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill skipped values */ + if (minWeight>1) { + U32 i, skipSize = rankVal[minWeight]; + MEM_writeLE16(&(DElt.sequence), baseSeq); + DElt.nbBits = (BYTE)(consumed); + DElt.length = 1; + for (i = 0; i < skipSize; i++) + DTable[i] = DElt; + } + + /* fill DTable */ + { U32 s; for (s=0; s= 1 */ + + rankVal[weight] += length; + } } +} + +typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; +typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; + +static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, + const sortedSymbol_t* sortedList, const U32 sortedListSize, + const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32 nbBitsBaseline) +{ + U32 rankVal[HUF_TABLELOG_MAX + 1]; + const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ + const U32 minBits = nbBitsBaseline - maxWeight; + U32 s; + + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill DTable */ + for (s=0; s= minBits) { /* enough room for a second symbol */ + U32 sortedRank; + int minWeight = nbBits + scaleLog; + if (minWeight < 1) minWeight = 1; + sortedRank = rankStart[minWeight]; + HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, + rankValOrigin[nbBits], minWeight, + sortedList+sortedRank, sortedListSize-sortedRank, + nbBitsBaseline, symbol); + } else { + HUF_DEltX4 DElt; + MEM_writeLE16(&(DElt.sequence), symbol); + DElt.nbBits = (BYTE)(nbBits); + DElt.length = 1; + { U32 const end = start + length; + U32 u; + for (u = start; u < end; u++) DTable[u] = DElt; + } } + rankVal[weight] += length; + } +} + +size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src, + size_t srcSize, void* workSpace, + size_t wkspSize) +{ + U32 tableLog, maxW, sizeOfSort, nbSymbols; + DTableDesc dtd = HUF_getDTableDesc(DTable); + U32 const maxTableLog = dtd.maxTableLog; + size_t iSize; + void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ + HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr; + U32 *rankStart; + + rankValCol_t* rankVal; + U32* rankStats; + U32* rankStart0; + sortedSymbol_t* sortedSymbol; + BYTE* weightList; + size_t spaceUsed32 = 0; + + rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32); + spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; + rankStats = (U32 *)workSpace + spaceUsed32; + spaceUsed32 += HUF_TABLELOG_MAX + 1; + rankStart0 = (U32 *)workSpace + spaceUsed32; + spaceUsed32 += HUF_TABLELOG_MAX + 2; + sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t); + spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; + weightList = (BYTE *)((U32 *)workSpace + spaceUsed32); + spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; + + if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); + + rankStart = rankStart0 + 1; + memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); + + HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ + if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* check result */ + if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + + /* find maxWeight */ + for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ + + /* Get start index of each weight */ + { U32 w, nextRankStart = 0; + for (w=1; w> consumed; + } } } } + + HUF_fillDTableX4(dt, maxTableLog, + sortedSymbol, sizeOfSort, + rankStart0, rankVal, maxW, + tableLog+1); + + dtd.tableLog = (BYTE)maxTableLog; + dtd.tableType = 1; + memcpy(DTable, &dtd, sizeof(dtd)); + return iSize; +} + +size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_readDTableX4_wksp(DTable, src, srcSize, + workSpace, sizeof(workSpace)); +} + +size_t HUF_decompress1X4_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 1) return ERROR(GENERIC); + return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, + workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); +} + + +size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + +size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX); + return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + +size_t HUF_decompress4X4_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 1) return ERROR(GENERIC); + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +static size_t HUF_decompress4X4_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize, int bmi2) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, + workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); +} + +size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + return HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); +} + + +size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + +size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX); + return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + + +/* ********************************/ +/* Generic decompression selector */ +/* ********************************/ + +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); + return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : + HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); + return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : + HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + + +typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = +{ + /* single, double, quad */ + {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ + {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ + {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ + {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ + {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ + {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ + {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ + {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ + {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ + {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ + {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ + {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ + {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ + {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ + {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ + {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ +}; + +/** HUF_selectDecoder() : + * Tells which decoder is likely to decode faster, + * based on a set of pre-computed metrics. + * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . + * Assumption : 0 < dstSize <= 128 KB */ +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) +{ + assert(dstSize > 0); + assert(dstSize <= 128 KB); + /* decoder timing evaluation */ + { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */ + U32 const D256 = (U32)(dstSize >> 8); + U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); + U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); + DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */ + return DTime1 < DTime0; +} } + + +typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); + +size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 }; + + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); + } +} + +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } +} + +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + + +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, + size_t dstSize, const void* cSrc, + size_t cSrcSize, void* workSpace, + size_t wkspSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize == 0) return ERROR(corruption_detected); + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize): + HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); + } +} + +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, + cSrcSize, workSpace, wkspSize): + HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, + cSrcSize, workSpace, wkspSize); + } +} + +size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + + +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); + return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : + HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); +} + +size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); +} + +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); + return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : + HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); +} + +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize == 0) return ERROR(corruption_detected); + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : + HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); + } +} diff --git a/vendor/github.com/DataDog/zstd/mem.h b/vendor/github.com/DataDog/zstd/mem.h new file mode 100644 index 00000000000..47d2300177c --- /dev/null +++ b/vendor/github.com/DataDog/zstd/mem.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef MEM_H_MODULE +#define MEM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + +/*-**************************************** +* Dependencies +******************************************/ +#include /* size_t, ptrdiff_t */ +#include /* memcpy */ + + +/*-**************************************** +* Compiler specifics +******************************************/ +#if defined(_MSC_VER) /* Visual Studio */ +# include /* _byteswap_ulong */ +# include /* _byteswap_* */ +#endif +#if defined(__GNUC__) +# define MEM_STATIC static __inline __attribute__((unused)) +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define MEM_STATIC static inline +#elif defined(_MSC_VER) +# define MEM_STATIC static __inline +#else +# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + +/* code only tested on 32 and 64 bits systems */ +#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } +MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } + + +/*-************************************************************** +* Basic Types +*****************************************************************/ +#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef int16_t S16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef int64_t S64; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef signed short S16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; + typedef signed long long S64; +#endif + + +/*-************************************************************** +* Memory I/O +*****************************************************************/ +/* MEM_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets depending on alignment. + * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) + * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define MEM_FORCE_MEMORY_ACCESS 2 +# elif defined(__INTEL_COMPILER) || defined(__GNUC__) +# define MEM_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } +MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } + +MEM_STATIC unsigned MEM_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) + +/* violates C standard, by lying on structure alignment. +Only use if no other choice to achieve best performance on target platform */ +MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } +MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } +MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } +MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } + +#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) + __pragma( pack(push, 1) ) + typedef struct { U16 v; } unalign16; + typedef struct { U32 v; } unalign32; + typedef struct { U64 v; } unalign64; + typedef struct { size_t v; } unalignArch; + __pragma( pack(pop) ) +#else + typedef struct { U16 v; } __attribute__((packed)) unalign16; + typedef struct { U32 v; } __attribute__((packed)) unalign32; + typedef struct { U64 v; } __attribute__((packed)) unalign64; + typedef struct { size_t v; } __attribute__((packed)) unalignArch; +#endif + +MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } +MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } + +#else + +/* default method, safe and standard. + can sometimes prove slower */ + +MEM_STATIC U16 MEM_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U32 MEM_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U64 MEM_read64(const void* memPtr) +{ + U64 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC size_t MEM_readST(const void* memPtr) +{ + size_t val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +MEM_STATIC void MEM_write32(void* memPtr, U32 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +MEM_STATIC void MEM_write64(void* memPtr, U64 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* MEM_FORCE_MEMORY_ACCESS */ + +MEM_STATIC U32 MEM_swap32(U32 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_ulong(in); +#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) + return __builtin_bswap32(in); +#else + return ((in << 24) & 0xff000000 ) | + ((in << 8) & 0x00ff0000 ) | + ((in >> 8) & 0x0000ff00 ) | + ((in >> 24) & 0x000000ff ); +#endif +} + +MEM_STATIC U64 MEM_swap64(U64 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_uint64(in); +#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403) + return __builtin_bswap64(in); +#else + return ((in << 56) & 0xff00000000000000ULL) | + ((in << 40) & 0x00ff000000000000ULL) | + ((in << 24) & 0x0000ff0000000000ULL) | + ((in << 8) & 0x000000ff00000000ULL) | + ((in >> 8) & 0x00000000ff000000ULL) | + ((in >> 24) & 0x0000000000ff0000ULL) | + ((in >> 40) & 0x000000000000ff00ULL) | + ((in >> 56) & 0x00000000000000ffULL); +#endif +} + +MEM_STATIC size_t MEM_swapST(size_t in) +{ + if (MEM_32bits()) + return (size_t)MEM_swap32((U32)in); + else + return (size_t)MEM_swap64((U64)in); +} + +/*=== Little endian r/w ===*/ + +MEM_STATIC U16 MEM_readLE16(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read16(memPtr); + else { + const BYTE* p = (const BYTE*)memPtr; + return (U16)(p[0] + (p[1]<<8)); + } +} + +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) +{ + if (MEM_isLittleEndian()) { + MEM_write16(memPtr, val); + } else { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE)val; + p[1] = (BYTE)(val>>8); + } +} + +MEM_STATIC U32 MEM_readLE24(const void* memPtr) +{ + return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); +} + +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) +{ + MEM_writeLE16(memPtr, (U16)val); + ((BYTE*)memPtr)[2] = (BYTE)(val>>16); +} + +MEM_STATIC U32 MEM_readLE32(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read32(memPtr); + else + return MEM_swap32(MEM_read32(memPtr)); +} + +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) +{ + if (MEM_isLittleEndian()) + MEM_write32(memPtr, val32); + else + MEM_write32(memPtr, MEM_swap32(val32)); +} + +MEM_STATIC U64 MEM_readLE64(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read64(memPtr); + else + return MEM_swap64(MEM_read64(memPtr)); +} + +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) +{ + if (MEM_isLittleEndian()) + MEM_write64(memPtr, val64); + else + MEM_write64(memPtr, MEM_swap64(val64)); +} + +MEM_STATIC size_t MEM_readLEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readLE32(memPtr); + else + return (size_t)MEM_readLE64(memPtr); +} + +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeLE32(memPtr, (U32)val); + else + MEM_writeLE64(memPtr, (U64)val); +} + +/*=== Big endian r/w ===*/ + +MEM_STATIC U32 MEM_readBE32(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_swap32(MEM_read32(memPtr)); + else + return MEM_read32(memPtr); +} + +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) +{ + if (MEM_isLittleEndian()) + MEM_write32(memPtr, MEM_swap32(val32)); + else + MEM_write32(memPtr, val32); +} + +MEM_STATIC U64 MEM_readBE64(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_swap64(MEM_read64(memPtr)); + else + return MEM_read64(memPtr); +} + +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) +{ + if (MEM_isLittleEndian()) + MEM_write64(memPtr, MEM_swap64(val64)); + else + MEM_write64(memPtr, val64); +} + +MEM_STATIC size_t MEM_readBEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readBE32(memPtr); + else + return (size_t)MEM_readBE64(memPtr); +} + +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeBE32(memPtr, (U32)val); + else + MEM_writeBE64(memPtr, (U64)val); +} + + +#if defined (__cplusplus) +} +#endif + +#endif /* MEM_H_MODULE */ diff --git a/vendor/github.com/DataDog/zstd/pool.c b/vendor/github.com/DataDog/zstd/pool.c new file mode 100644 index 00000000000..773488b0725 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/pool.c @@ -0,0 +1,283 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/* ====== Dependencies ======= */ +#include /* size_t */ +#include "pool.h" +#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */ + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +#endif + + +#ifdef ZSTD_MULTITHREAD + +#include "threading.h" /* pthread adaptation */ + +/* A job is a function and an opaque argument */ +typedef struct POOL_job_s { + POOL_function function; + void *opaque; +} POOL_job; + +struct POOL_ctx_s { + ZSTD_customMem customMem; + /* Keep track of the threads */ + ZSTD_pthread_t *threads; + size_t numThreads; + + /* The queue is a circular buffer */ + POOL_job *queue; + size_t queueHead; + size_t queueTail; + size_t queueSize; + + /* The number of threads working on jobs */ + size_t numThreadsBusy; + /* Indicates if the queue is empty */ + int queueEmpty; + + /* The mutex protects the queue */ + ZSTD_pthread_mutex_t queueMutex; + /* Condition variable for pushers to wait on when the queue is full */ + ZSTD_pthread_cond_t queuePushCond; + /* Condition variables for poppers to wait on when the queue is empty */ + ZSTD_pthread_cond_t queuePopCond; + /* Indicates if the queue is shutting down */ + int shutdown; +}; + +/* POOL_thread() : + Work thread for the thread pool. + Waits for jobs and executes them. + @returns : NULL on failure else non-null. +*/ +static void* POOL_thread(void* opaque) { + POOL_ctx* const ctx = (POOL_ctx*)opaque; + if (!ctx) { return NULL; } + for (;;) { + /* Lock the mutex and wait for a non-empty queue or until shutdown */ + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + + while (ctx->queueEmpty && !ctx->shutdown) { + ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); + } + /* empty => shutting down: so stop */ + if (ctx->queueEmpty) { + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return opaque; + } + /* Pop a job off the queue */ + { POOL_job const job = ctx->queue[ctx->queueHead]; + ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; + ctx->numThreadsBusy++; + ctx->queueEmpty = ctx->queueHead == ctx->queueTail; + /* Unlock the mutex, signal a pusher, and run the job */ + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + ZSTD_pthread_cond_signal(&ctx->queuePushCond); + + job.function(job.opaque); + + /* If the intended queue size was 0, signal after finishing job */ + if (ctx->queueSize == 1) { + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + ctx->numThreadsBusy--; + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + ZSTD_pthread_cond_signal(&ctx->queuePushCond); + } } + } /* for (;;) */ + /* Unreachable */ +} + +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); +} + +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { + POOL_ctx* ctx; + /* Check the parameters */ + if (!numThreads) { return NULL; } + /* Allocate the context and zero initialize */ + ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem); + if (!ctx) { return NULL; } + /* Initialize the job queue. + * It needs one extra space since one space is wasted to differentiate empty + * and full queues. + */ + ctx->queueSize = queueSize + 1; + ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem); + ctx->queueHead = 0; + ctx->queueTail = 0; + ctx->numThreadsBusy = 0; + ctx->queueEmpty = 1; + (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); + (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL); + (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); + ctx->shutdown = 0; + /* Allocate space for the thread handles */ + ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem); + ctx->numThreads = 0; + ctx->customMem = customMem; + /* Check for errors */ + if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } + /* Initialize the threads */ + { size_t i; + for (i = 0; i < numThreads; ++i) { + if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { + ctx->numThreads = i; + POOL_free(ctx); + return NULL; + } } + ctx->numThreads = numThreads; + } + return ctx; +} + +/*! POOL_join() : + Shutdown the queue, wake any sleeping threads, and join all of the threads. +*/ +static void POOL_join(POOL_ctx* ctx) { + /* Shut down the queue */ + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + ctx->shutdown = 1; + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + /* Wake up sleeping threads */ + ZSTD_pthread_cond_broadcast(&ctx->queuePushCond); + ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); + /* Join all of the threads */ + { size_t i; + for (i = 0; i < ctx->numThreads; ++i) { + ZSTD_pthread_join(ctx->threads[i], NULL); + } } +} + +void POOL_free(POOL_ctx *ctx) { + if (!ctx) { return; } + POOL_join(ctx); + ZSTD_pthread_mutex_destroy(&ctx->queueMutex); + ZSTD_pthread_cond_destroy(&ctx->queuePushCond); + ZSTD_pthread_cond_destroy(&ctx->queuePopCond); + ZSTD_free(ctx->queue, ctx->customMem); + ZSTD_free(ctx->threads, ctx->customMem); + ZSTD_free(ctx, ctx->customMem); +} + +size_t POOL_sizeof(POOL_ctx *ctx) { + if (ctx==NULL) return 0; /* supports sizeof NULL */ + return sizeof(*ctx) + + ctx->queueSize * sizeof(POOL_job) + + ctx->numThreads * sizeof(ZSTD_pthread_t); +} + +/** + * Returns 1 if the queue is full and 0 otherwise. + * + * If the queueSize is 1 (the pool was created with an intended queueSize of 0), + * then a queue is empty if there is a thread free and no job is waiting. + */ +static int isQueueFull(POOL_ctx const* ctx) { + if (ctx->queueSize > 1) { + return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); + } else { + return ctx->numThreadsBusy == ctx->numThreads || + !ctx->queueEmpty; + } +} + + +static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) +{ + POOL_job const job = {function, opaque}; + assert(ctx != NULL); + if (ctx->shutdown) return; + + ctx->queueEmpty = 0; + ctx->queue[ctx->queueTail] = job; + ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize; + ZSTD_pthread_cond_signal(&ctx->queuePopCond); +} + +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) +{ + assert(ctx != NULL); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + /* Wait until there is space in the queue for the new job */ + while (isQueueFull(ctx) && (!ctx->shutdown)) { + ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + } + POOL_add_internal(ctx, function, opaque); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); +} + + +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) +{ + assert(ctx != NULL); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + if (isQueueFull(ctx)) { + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return 0; + } + POOL_add_internal(ctx, function, opaque); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return 1; +} + + +#else /* ZSTD_MULTITHREAD not defined */ + +/* ========================== */ +/* No multi-threading support */ +/* ========================== */ + + +/* We don't need any data, but if it is empty, malloc() might return NULL. */ +struct POOL_ctx_s { + int dummy; +}; +static POOL_ctx g_ctx; + +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); +} + +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { + (void)numThreads; + (void)queueSize; + (void)customMem; + return &g_ctx; +} + +void POOL_free(POOL_ctx* ctx) { + assert(!ctx || ctx == &g_ctx); + (void)ctx; +} + +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { + (void)ctx; + function(opaque); +} + +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { + (void)ctx; + function(opaque); + return 1; +} + +size_t POOL_sizeof(POOL_ctx* ctx) { + if (ctx==NULL) return 0; /* supports sizeof NULL */ + assert(ctx == &g_ctx); + return sizeof(*ctx); +} + +#endif /* ZSTD_MULTITHREAD */ diff --git a/vendor/github.com/DataDog/zstd/pool.h b/vendor/github.com/DataDog/zstd/pool.h new file mode 100644 index 00000000000..a57e9b4fabc --- /dev/null +++ b/vendor/github.com/DataDog/zstd/pool.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef POOL_H +#define POOL_H + +#if defined (__cplusplus) +extern "C" { +#endif + + +#include /* size_t */ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ +#include "zstd.h" + +typedef struct POOL_ctx_s POOL_ctx; + +/*! POOL_create() : + * Create a thread pool with at most `numThreads` threads. + * `numThreads` must be at least 1. + * The maximum number of queued jobs before blocking is `queueSize`. + * @return : POOL_ctx pointer on success, else NULL. +*/ +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize); + +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem); + +/*! POOL_free() : + Free a thread pool returned by POOL_create(). +*/ +void POOL_free(POOL_ctx* ctx); + +/*! POOL_sizeof() : + return memory usage of pool returned by POOL_create(). +*/ +size_t POOL_sizeof(POOL_ctx* ctx); + +/*! POOL_function : + The function type that can be added to a thread pool. +*/ +typedef void (*POOL_function)(void*); +/*! POOL_add_function : + The function type for a generic thread pool add function. +*/ +typedef void (*POOL_add_function)(void*, POOL_function, void*); + +/*! POOL_add() : + Add the job `function(opaque)` to the thread pool. `ctx` must be valid. + Possibly blocks until there is room in the queue. + Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed. +*/ +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); + + +/*! POOL_tryAdd() : + Add the job `function(opaque)` to the thread pool if a worker is available. + return immediately otherwise. + @return : 1 if successful, 0 if not. +*/ +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); + + +#if defined (__cplusplus) +} +#endif + +#endif diff --git a/vendor/github.com/DataDog/zstd/threading.c b/vendor/github.com/DataDog/zstd/threading.c new file mode 100644 index 00000000000..8be8c8da948 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/threading.c @@ -0,0 +1,75 @@ +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + */ + +/** + * This file will hold wrapper for systems, which do not support pthreads + */ + +/* create fake symbol to avoid empty trnaslation unit warning */ +int g_ZSTD_threading_useles_symbol; + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ + + +/* === Dependencies === */ +#include +#include +#include "threading.h" + + +/* === Implementation === */ + +static unsigned __stdcall worker(void *arg) +{ + ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; + thread->arg = thread->start_routine(thread->arg); + return 0; +} + +int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg) +{ + (void)unused; + thread->arg = arg; + thread->start_routine = start_routine; + thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); + + if (!thread->handle) + return errno; + else + return 0; +} + +int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) +{ + DWORD result; + + if (!thread.handle) return 0; + + result = WaitForSingleObject(thread.handle, INFINITE); + switch (result) { + case WAIT_OBJECT_0: + if (value_ptr) *value_ptr = thread.arg; + return 0; + case WAIT_ABANDONED: + return EINVAL; + default: + return GetLastError(); + } +} + +#endif /* ZSTD_MULTITHREAD */ diff --git a/vendor/github.com/DataDog/zstd/threading.h b/vendor/github.com/DataDog/zstd/threading.h new file mode 100644 index 00000000000..d806c89d01c --- /dev/null +++ b/vendor/github.com/DataDog/zstd/threading.h @@ -0,0 +1,123 @@ +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + */ + +#ifndef THREADING_H_938743 +#define THREADING_H_938743 + +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ +#ifdef WINVER +# undef WINVER +#endif +#define WINVER 0x0600 + +#ifdef _WIN32_WINNT +# undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0600 + +#ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +#endif + +#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ +#include +#undef ERROR +#define ERROR(name) ZSTD_ERROR(name) + + +/* mutex */ +#define ZSTD_pthread_mutex_t CRITICAL_SECTION +#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0) +#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a)) +#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a)) +#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a)) + +/* condition variable */ +#define ZSTD_pthread_cond_t CONDITION_VARIABLE +#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0) +#define ZSTD_pthread_cond_destroy(a) ((void)(a)) +#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) +#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a)) +#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) + +/* ZSTD_pthread_create() and ZSTD_pthread_join() */ +typedef struct { + HANDLE handle; + void* (*start_routine)(void*); + void* arg; +} ZSTD_pthread_t; + +int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg); + +int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); + +/** + * add here more wrappers as required + */ + + +#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ +/* === POSIX Systems === */ +# include + +#define ZSTD_pthread_mutex_t pthread_mutex_t +#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) +#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) +#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) +#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) + +#define ZSTD_pthread_cond_t pthread_cond_t +#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) +#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) +#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) +#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) +#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) + +#define ZSTD_pthread_t pthread_t +#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) +#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) + +#else /* ZSTD_MULTITHREAD not defined */ +/* No multithreading support */ + +typedef int ZSTD_pthread_mutex_t; +#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0) +#define ZSTD_pthread_mutex_destroy(a) ((void)(a)) +#define ZSTD_pthread_mutex_lock(a) ((void)(a)) +#define ZSTD_pthread_mutex_unlock(a) ((void)(a)) + +typedef int ZSTD_pthread_cond_t; +#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0) +#define ZSTD_pthread_cond_destroy(a) ((void)(a)) +#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b)) +#define ZSTD_pthread_cond_signal(a) ((void)(a)) +#define ZSTD_pthread_cond_broadcast(a) ((void)(a)) + +/* do not use ZSTD_pthread_t */ + +#endif /* ZSTD_MULTITHREAD */ + +#if defined (__cplusplus) +} +#endif + +#endif /* THREADING_H_938743 */ diff --git a/vendor/github.com/DataDog/zstd/travis_test_32.sh b/vendor/github.com/DataDog/zstd/travis_test_32.sh new file mode 100644 index 00000000000..d29c86c9eb4 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/travis_test_32.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Get utilities +yum -y -q -e 0 install wget tar unzip gcc + +# Get Go +wget -q https://dl.google.com/go/go1.11.1.linux-386.tar.gz +tar -C /usr/local -xzf go1.11.1.linux-386.tar.gz +export PATH=$PATH:/usr/local/go/bin + +# Get payload +wget -q https://github.com/DataDog/zstd/files/2246767/mr.zip +unzip mr.zip + +# Build and run tests +cd zstd +go build +PAYLOAD=$(pwd)/mr go test -v +PAYLOAD=$(pwd)/mr go test -bench . diff --git a/vendor/github.com/DataDog/zstd/update.txt b/vendor/github.com/DataDog/zstd/update.txt new file mode 100644 index 00000000000..1de939f76e5 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/update.txt @@ -0,0 +1,56 @@ +./lib/common/bitstream.h +./lib/common/compiler.h +./lib/compress/zstd_compress_internal.h +./lib/compress/zstd_fast.h +./lib/compress/zstd_double_fast.h +./lib/compress/zstd_lazy.h +./lib/compress/zstd_ldm.h +./lib/dictBuilder/cover.c +./lib/dictBuilder/divsufsort.c +./lib/dictBuilder/divsufsort.h +./lib/common/entropy_common.c +./lib/common/error_private.c +./lib/common/error_private.h +./lib/compress/fse_compress.c +./lib/common/fse_decompress.c +./lib/common/fse.h +./lib/compress/huf_compress.c +./lib/decompress/huf_decompress.c +./lib/common/huf.h +./lib/common/mem.h +./lib/common/pool.c +./lib/common/pool.h +./lib/common/threading.c +./lib/common/threading.h +./lib/common/xxhash.c +./lib/common/xxhash.h +./lib/deprecated/zbuff_common.c +./lib/deprecated/zbuff_compress.c +./lib/deprecated/zbuff_decompress.c +./lib/deprecated/zbuff.h +./lib/dictBuilder/zdict.c +./lib/dictBuilder/zdict.h +./lib/common/zstd_common.c +./lib/compress/zstd_compress.c +./lib/decompress/zstd_decompress.c +./lib/common/zstd_errors.h +./lib/zstd.h +./lib/common/zstd_internal.h +./lib/legacy/zstd_legacy.h +./lib/compress/zstd_opt.c +./lib/compress/zstd_opt.h +./lib/legacy/zstd_v01.c +./lib/legacy/zstd_v01.h +./lib/legacy/zstd_v02.c +./lib/legacy/zstd_v02.h +./lib/legacy/zstd_v03.c +./lib/legacy/zstd_v03.h +./lib/legacy/zstd_v04.c +./lib/legacy/zstd_v04.h +./lib/legacy/zstd_v05.c +./lib/legacy/zstd_v05.h +./lib/legacy/zstd_v06.c +./lib/legacy/zstd_v06.h +./lib/legacy/zstd_v07.c +./lib/legacy/zstd_v07.h + diff --git a/vendor/github.com/DataDog/zstd/xxhash.c b/vendor/github.com/DataDog/zstd/xxhash.c new file mode 100644 index 00000000000..9d9c0e963cb --- /dev/null +++ b/vendor/github.com/DataDog/zstd/xxhash.c @@ -0,0 +1,875 @@ +/* +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. + * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. + * By default, this option is disabled. To enable it, uncomment below define : + */ +/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independant Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independance be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +# define XXH_FORCE_NATIVE_FORMAT 0 +#endif + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; set to 0 when the input data + * is guaranteed to be aligned. + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/* Modify the local functions below should you wish to use some other memory routines */ +/* for malloc(), free() */ +#include +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/* for memcpy() */ +#include +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +#endif +#include "xxhash.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# define INLINE_KEYWORD inline +#else +# define INLINE_KEYWORD +#endif + +#if defined(__GNUC__) +# define FORCE_INLINE_ATTR __attribute__((always_inline)) +#elif defined(_MSC_VER) +# define FORCE_INLINE_ATTR __forceinline +#else +# define FORCE_INLINE_ATTR +#endif + +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR + + +#ifdef _MSC_VER +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + + +/* ************************************* +* Basic Types +***************************************/ +#ifndef MEM_MODULE +# define MEM_MODULE +# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; +# else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ +# endif +#endif + + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign; + +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static U32 XXH_read32(const void* memPtr) +{ + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +static U64 XXH_read64(const void* memPtr) +{ + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#if defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +# define XXH_swap64 _byteswap_uint64 +#elif GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +# define XXH_swap64 __builtin_bswap64 +#else +static U32 XXH_swap32 (U32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +static U64 XXH_swap64 (U64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN + static const int g_one = 1; +# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + else + return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +} + +FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} + +FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + else + return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +} + +FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/* ************************************* +* Constants +***************************************/ +static const U32 PRIME32_1 = 2654435761U; +static const U32 PRIME32_2 = 2246822519U; +static const U32 PRIME32_3 = 3266489917U; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static const U64 PRIME64_1 = 11400714785074694791ULL; +static const U64 PRIME64_2 = 14029467366897019727ULL; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ************************** +* Utils +****************************/ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + + +/* *************************** +* Simple Hash Functions +*****************************/ + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p<=limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32) len; + + while (p+4<=bEnd) { + h32 += XXH_get32bits(p) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + p+=4; + } + + while (p> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_CREATESTATE_STATIC(state); + XXH32_reset(state, seed); + XXH32_update(state, input, len); + return XXH32_digest(state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + U64 h64; +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_CREATESTATE_STATIC(state); + XXH64_reset(state, seed); + XXH64_update(state, input, len); + return XXH64_digest(state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + +/* ************************************************** +* Advanced Hash Functions +****************************************************/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + + +/*** Hash feed ***/ + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem32; + const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + while (p+4<=bEnd) { + h32 += XXH_readLE32(p, endian) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4; + p+=4; + } + + while (p> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + + +/* **** XXH64 **** */ + +FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem64; + const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 + PRIME64_5; + } + + h64 += (U64) state->total_len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/* ************************** +* Canonical representation +****************************/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} diff --git a/vendor/github.com/DataDog/zstd/xxhash.h b/vendor/github.com/DataDog/zstd/xxhash.h new file mode 100644 index 00000000000..9bad1f59f63 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/xxhash.h @@ -0,0 +1,305 @@ +/* + xxHash - Extremely Fast Hash algorithm + Header File + Copyright (C) 2012-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bits version, named XXH64, is available since r35. +It offers much better speed, but for 64-bits applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + + +/* **************************** +* Definitions +******************************/ +#include /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/* **************************** +* API modifier +******************************/ +/** XXH_PRIVATE_API +* This is useful if you want to include xxhash functions in `static` mode +* in order to inline them, and remove their symbol from the public list. +* Methodology : +* #define XXH_PRIVATE_API +* #include "xxhash.h" +* `xxhash.c` is automatically included. +* It's not useful to compile and link it as a separate module anymore. +*/ +#ifdef XXH_PRIVATE_API +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else +# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ +# endif +#else +# define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_PRIVATE_API */ + +/*!XXH_NAMESPACE, aka Namespace Emulation : + +If you want to include _and expose_ xxHash functions from within your own library, +but also want to avoid symbol collisions with another library which also includes xxHash, + +you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library +with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). + +Note that no change is required within the calling program as long as it includes `xxhash.h` : +regular symbol name will be automatically translated by this header. +*/ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif + + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); + + +/* **************************** +* Simple Hash Functions +******************************/ +typedef unsigned int XXH32_hash_t; +typedef unsigned long long XXH64_hash_t; + +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); + +/*! +XXH32() : + Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s +XXH64() : + Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). +*/ + + +/* **************************** +* Streaming Hash Functions +******************************/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ + +/*! State allocation, compatible with dynamic libraries */ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); + + +/* hash streaming */ + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/* +These functions generate the xxHash of an input provided in multiple segments. +Note that, for small input, they are slower than single-call functions, due to state management. +For small input, prefer `XXH32()` and `XXH64()` . + +XXH state must first be allocated, using XXH*_createState() . + +Start a new hash by initializing state with a seed, using XXH*_reset(). + +Then, feed the hash state by calling XXH*_update() as many times as necessary. +Obviously, input must be allocated and read accessible. +The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + +Finally, a hash value can be produced anytime, by using XXH*_digest(). +This function returns the nn-bits hash as an int or long long. + +It's still possible to continue inserting input into the hash state after a digest, +and generate some new hashes later on, by calling again XXH*_digest(). + +When done, free XXH state space if it was allocated dynamically. +*/ + + +/* ************************** +* Utils +****************************/ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ +# define restrict /* disable restrict */ +#endif + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); + + +/* ************************** +* Canonical representation +****************************/ +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. +* The canonical representation uses human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. +*/ +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); + +#endif /* XXHASH_H_5627135585666179 */ + + + +/* ================================================================================================ + This section contains definitions which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + They shall only be used with static linking. + Never use these definitions in association with dynamic linking ! +=================================================================================================== */ +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345) +#define XXH_STATIC_H_3543687687345 + +/* These definitions are only meant to allow allocation of XXH state + statically, on stack, or in a struct for example. + Do not use members directly. */ + + struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; /* buffer defined as U32 for alignment */ + unsigned memsize; + unsigned reserved; /* never read nor write, will be removed in a future version */ + }; /* typedef'd to XXH32_state_t */ + + struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ + unsigned memsize; + unsigned reserved[2]; /* never read nor write, will be removed in a future version */ + }; /* typedef'd to XXH64_state_t */ + + +# ifdef XXH_PRIVATE_API +# include "xxhash.c" /* include xxhash functions as `static`, for inlining */ +# endif + +#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */ + + +#if defined (__cplusplus) +} +#endif diff --git a/vendor/github.com/DataDog/zstd/zbuff.h b/vendor/github.com/DataDog/zstd/zbuff.h new file mode 100644 index 00000000000..a93115da4a1 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zbuff.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* *************************************************************** +* NOTES/WARNINGS +******************************************************************/ +/* The streaming API defined here is deprecated. + * Consider migrating towards ZSTD_compressStream() API in `zstd.h` + * See 'lib/README.md'. + *****************************************************************/ + + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef ZSTD_BUFFERED_H_23987 +#define ZSTD_BUFFERED_H_23987 + +/* ************************************* +* Dependencies +***************************************/ +#include /* size_t */ +#include "zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ + + +/* *************************************************************** +* Compiler specifics +*****************************************************************/ +/* Deprecation warnings */ +/* Should these warnings be a problem, + it is generally possible to disable them, + typically with -Wno-deprecated-declarations for gcc + or _CRT_SECURE_NO_WARNINGS in Visual. + Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */ +#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */ +#else +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API +# elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__) +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message))) +# elif defined(__GNUC__) && (__GNUC__ >= 3) +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler") +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API +# endif +#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */ + + +/* ************************************* +* Streaming functions +***************************************/ +/* This is the easier "buffered" streaming API, +* using an internal buffer to lift all restrictions on user-provided buffers +* which can be any size, any place, for both input and output. +* ZBUFF and ZSTD are 100% interoperable, +* frames created by one can be decoded by the other one */ + +typedef ZSTD_CStream ZBUFF_CCtx; +ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void); +ZBUFF_DEPRECATED("use ZSTD_freeCStream") size_t ZBUFF_freeCCtx(ZBUFF_CCtx* cctx); + +ZBUFF_DEPRECATED("use ZSTD_initCStream") size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel); +ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); + +ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); +ZBUFF_DEPRECATED("use ZSTD_flushStream") size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); +ZBUFF_DEPRECATED("use ZSTD_endStream") size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); + +/*-************************************************* +* Streaming compression - howto +* +* A ZBUFF_CCtx object is required to track streaming operation. +* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. +* ZBUFF_CCtx objects can be reused multiple times. +* +* Start by initializing ZBUF_CCtx. +* Use ZBUFF_compressInit() to start a new compression operation. +* Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary. +* +* Use ZBUFF_compressContinue() repetitively to consume input stream. +* *srcSizePtr and *dstCapacityPtr can be any size. +* The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr. +* Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data. +* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst . +* @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency) +* or an error code, which can be tested using ZBUFF_isError(). +* +* At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush(). +* The nb of bytes written into `dst` will be reported into *dstCapacityPtr. +* Note that the function cannot output more than *dstCapacityPtr, +* therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small. +* @return : nb of bytes still present into internal buffer (0 if it's empty) +* or an error code, which can be tested using ZBUFF_isError(). +* +* ZBUFF_compressEnd() instructs to finish a frame. +* It will perform a flush and write frame epilogue. +* The epilogue is required for decoders to consider a frame completed. +* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. +* In which case, call again ZBUFF_compressFlush() to complete the flush. +* @return : nb of bytes still present into internal buffer (0 if it's empty) +* or an error code, which can be tested using ZBUFF_isError(). +* +* Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize() +* input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency) +* output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering. +* By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering. +* **************************************************/ + + +typedef ZSTD_DStream ZBUFF_DCtx; +ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void); +ZBUFF_DEPRECATED("use ZSTD_freeDStream") size_t ZBUFF_freeDCtx(ZBUFF_DCtx* dctx); + +ZBUFF_DEPRECATED("use ZSTD_initDStream") size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx); +ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize); + +ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx, + void* dst, size_t* dstCapacityPtr, + const void* src, size_t* srcSizePtr); + +/*-*************************************************************************** +* Streaming decompression howto +* +* A ZBUFF_DCtx object is required to track streaming operations. +* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources. +* Use ZBUFF_decompressInit() to start a new decompression operation, +* or ZBUFF_decompressInitDictionary() if decompression requires a dictionary. +* Note that ZBUFF_DCtx objects can be re-init multiple times. +* +* Use ZBUFF_decompressContinue() repetitively to consume your input. +* *srcSizePtr and *dstCapacityPtr can be any size. +* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. +* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. +* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. +* @return : 0 when a frame is completely decoded and fully flushed, +* 1 when there is still some data left within internal buffer to flush, +* >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency), +* or an error code, which can be tested using ZBUFF_isError(). +* +* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize() +* output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. +* input : ZBUFF_recommendedDInSize == 128KB + 3; +* just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . +* *******************************************************************************/ + + +/* ************************************* +* Tool functions +***************************************/ +ZBUFF_DEPRECATED("use ZSTD_isError") unsigned ZBUFF_isError(size_t errorCode); +ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode); + +/** Functions below provide recommended buffer sizes for Compression or Decompression operations. +* These sizes are just hints, they tend to offer better latency */ +ZBUFF_DEPRECATED("use ZSTD_CStreamInSize") size_t ZBUFF_recommendedCInSize(void); +ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void); +ZBUFF_DEPRECATED("use ZSTD_DStreamInSize") size_t ZBUFF_recommendedDInSize(void); +ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void); + +#endif /* ZSTD_BUFFERED_H_23987 */ + + +#ifdef ZBUFF_STATIC_LINKING_ONLY +#ifndef ZBUFF_STATIC_H_30298098432 +#define ZBUFF_STATIC_H_30298098432 + +/* ==================================================================================== + * The definitions in this section are considered experimental. + * They should never be used in association with a dynamic library, as they may change in the future. + * They are provided for advanced usages. + * Use them only in association with static linking. + * ==================================================================================== */ + +/*--- Dependency ---*/ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */ +#include "zstd.h" + + +/*--- Custom memory allocator ---*/ +/*! ZBUFF_createCCtx_advanced() : + * Create a ZBUFF compression context using external alloc and free functions */ +ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem); + +/*! ZBUFF_createDCtx_advanced() : + * Create a ZBUFF decompression context using external alloc and free functions */ +ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem); + + +/*--- Advanced Streaming Initialization ---*/ +ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize); + + +#endif /* ZBUFF_STATIC_H_30298098432 */ +#endif /* ZBUFF_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif diff --git a/vendor/github.com/DataDog/zstd/zbuff_common.c b/vendor/github.com/DataDog/zstd/zbuff_common.c new file mode 100644 index 00000000000..661b9b0e18c --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zbuff_common.c @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/*-************************************* +* Dependencies +***************************************/ +#include "error_private.h" +#include "zbuff.h" + +/*-**************************************** +* ZBUFF Error Management (deprecated) +******************************************/ + +/*! ZBUFF_isError() : +* tells if a return value is an error code */ +unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); } +/*! ZBUFF_getErrorName() : +* provides error code string from function result (useful for debugging) */ +const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } diff --git a/vendor/github.com/DataDog/zstd/zbuff_compress.c b/vendor/github.com/DataDog/zstd/zbuff_compress.c new file mode 100644 index 00000000000..f39c60d89f6 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zbuff_compress.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + + +/* ************************************* +* Dependencies +***************************************/ +#define ZBUFF_STATIC_LINKING_ONLY +#include "zbuff.h" + + +/*-*********************************************************** +* Streaming compression +* +* A ZBUFF_CCtx object is required to track streaming operation. +* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. +* Use ZBUFF_compressInit() to start a new compression operation. +* ZBUFF_CCtx objects can be reused multiple times. +* +* Use ZBUFF_compressContinue() repetitively to consume your input. +* *srcSizePtr and *dstCapacityPtr can be any size. +* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. +* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input. +* The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst . +* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency) +* or an error code, which can be tested using ZBUFF_isError(). +* +* ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer. +* Note that it will not output more than *dstCapacityPtr. +* Therefore, some content might still be left into its internal buffer if dst buffer is too small. +* @return : nb of bytes still present into internal buffer (0 if it's empty) +* or an error code, which can be tested using ZBUFF_isError(). +* +* ZBUFF_compressEnd() instructs to finish a frame. +* It will perform a flush and write frame epilogue. +* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. +* @return : nb of bytes still present into internal buffer (0 if it's empty) +* or an error code, which can be tested using ZBUFF_isError(). +* +* Hint : recommended buffer sizes (not compulsory) +* input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value. +* output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed. +* ***********************************************************/ + +ZBUFF_CCtx* ZBUFF_createCCtx(void) +{ + return ZSTD_createCStream(); +} + +ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createCStream_advanced(customMem); +} + +size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc) +{ + return ZSTD_freeCStream(zbc); +} + + +/* ====== Initialization ====== */ + +size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* preserve "0 == unknown" behavior */ + return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize); +} + + +size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel) +{ + return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel); +} + +size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel) +{ + return ZSTD_initCStream(zbc, compressionLevel); +} + +/* ====== Compression ====== */ + + +size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc, + void* dst, size_t* dstCapacityPtr, + const void* src, size_t* srcSizePtr) +{ + size_t result; + ZSTD_outBuffer outBuff; + ZSTD_inBuffer inBuff; + outBuff.dst = dst; + outBuff.pos = 0; + outBuff.size = *dstCapacityPtr; + inBuff.src = src; + inBuff.pos = 0; + inBuff.size = *srcSizePtr; + result = ZSTD_compressStream(zbc, &outBuff, &inBuff); + *dstCapacityPtr = outBuff.pos; + *srcSizePtr = inBuff.pos; + return result; +} + + + +/* ====== Finalize ====== */ + +size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr) +{ + size_t result; + ZSTD_outBuffer outBuff; + outBuff.dst = dst; + outBuff.pos = 0; + outBuff.size = *dstCapacityPtr; + result = ZSTD_flushStream(zbc, &outBuff); + *dstCapacityPtr = outBuff.pos; + return result; +} + + +size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr) +{ + size_t result; + ZSTD_outBuffer outBuff; + outBuff.dst = dst; + outBuff.pos = 0; + outBuff.size = *dstCapacityPtr; + result = ZSTD_endStream(zbc, &outBuff); + *dstCapacityPtr = outBuff.pos; + return result; +} + + + +/* ************************************* +* Tool functions +***************************************/ +size_t ZBUFF_recommendedCInSize(void) { return ZSTD_CStreamInSize(); } +size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); } diff --git a/vendor/github.com/DataDog/zstd/zbuff_decompress.c b/vendor/github.com/DataDog/zstd/zbuff_decompress.c new file mode 100644 index 00000000000..923c22b73c5 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zbuff_decompress.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + + +/* ************************************* +* Dependencies +***************************************/ +#define ZBUFF_STATIC_LINKING_ONLY +#include "zbuff.h" + + +ZBUFF_DCtx* ZBUFF_createDCtx(void) +{ + return ZSTD_createDStream(); +} + +ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createDStream_advanced(customMem); +} + +size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd) +{ + return ZSTD_freeDStream(zbd); +} + + +/* *** Initialization *** */ + +size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize) +{ + return ZSTD_initDStream_usingDict(zbd, dict, dictSize); +} + +size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd) +{ + return ZSTD_initDStream(zbd); +} + + +/* *** Decompression *** */ + +size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd, + void* dst, size_t* dstCapacityPtr, + const void* src, size_t* srcSizePtr) +{ + ZSTD_outBuffer outBuff; + ZSTD_inBuffer inBuff; + size_t result; + outBuff.dst = dst; + outBuff.pos = 0; + outBuff.size = *dstCapacityPtr; + inBuff.src = src; + inBuff.pos = 0; + inBuff.size = *srcSizePtr; + result = ZSTD_decompressStream(zbd, &outBuff, &inBuff); + *dstCapacityPtr = outBuff.pos; + *srcSizePtr = inBuff.pos; + return result; +} + + +/* ************************************* +* Tool functions +***************************************/ +size_t ZBUFF_recommendedDInSize(void) { return ZSTD_DStreamInSize(); } +size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); } diff --git a/vendor/github.com/DataDog/zstd/zdict.c b/vendor/github.com/DataDog/zstd/zdict.c new file mode 100644 index 00000000000..7d24e499181 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zdict.c @@ -0,0 +1,1108 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/*-************************************** +* Tuning parameters +****************************************/ +#define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */ +#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20) +#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO) + + +/*-************************************** +* Compiler Options +****************************************/ +/* Unix Large Files support (>4GB) */ +#define _FILE_OFFSET_BITS 64 +#if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */ +# define _LARGEFILE_SOURCE +#elif ! defined(__LP64__) /* No point defining Large file for 64 bit */ +# define _LARGEFILE64_SOURCE +#endif + + +/*-************************************* +* Dependencies +***************************************/ +#include /* malloc, free */ +#include /* memset */ +#include /* fprintf, fopen, ftello64 */ +#include /* clock */ + +#include "mem.h" /* read */ +#include "fse.h" /* FSE_normalizeCount, FSE_writeNCount */ +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" /* HUF_buildCTable, HUF_writeCTable */ +#include "zstd_internal.h" /* includes zstd.h */ +#include "xxhash.h" /* XXH64 */ +#include "divsufsort.h" +#ifndef ZDICT_STATIC_LINKING_ONLY +# define ZDICT_STATIC_LINKING_ONLY +#endif +#include "zdict.h" + + +/*-************************************* +* Constants +***************************************/ +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define DICTLISTSIZE_DEFAULT 10000 + +#define NOISELENGTH 32 + +static const int g_compressionLevel_default = 3; +static const U32 g_selectivity_default = 9; + + +/*-************************************* +* Console display +***************************************/ +#define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } +#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ + +static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; } + +static void ZDICT_printHex(const void* ptr, size_t length) +{ + const BYTE* const b = (const BYTE*)ptr; + size_t u; + for (u=0; u126) c = '.'; /* non-printable char */ + DISPLAY("%c", c); + } +} + + +/*-******************************************************** +* Helper functions +**********************************************************/ +unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); } + +const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } + +unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize) +{ + if (dictSize < 8) return 0; + if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0; + return MEM_readLE32((const char*)dictBuffer + 4); +} + + +/*-******************************************************** +* Dictionary training functions +**********************************************************/ +static unsigned ZDICT_NbCommonBytes (size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanForward64( &r, (U64)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r=0; + _BitScanForward( &r, (U32)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clzll(val) >> 3); +# else + unsigned r; + const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ + if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clz((U32)val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } } +} + + +/*! ZDICT_count() : + Count the nb of common bytes between 2 pointers. + Note : this function presumes end of buffer followed by noisy guard band. +*/ +static size_t ZDICT_count(const void* pIn, const void* pMatch) +{ + const char* const pStart = (const char*)pIn; + for (;;) { + size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (!diff) { + pIn = (const char*)pIn+sizeof(size_t); + pMatch = (const char*)pMatch+sizeof(size_t); + continue; + } + pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff); + return (size_t)((const char*)pIn - pStart); + } +} + + +typedef struct { + U32 pos; + U32 length; + U32 savings; +} dictItem; + +static void ZDICT_initDictItem(dictItem* d) +{ + d->pos = 1; + d->length = 0; + d->savings = (U32)(-1); +} + + +#define LLIMIT 64 /* heuristic determined experimentally */ +#define MINMATCHLENGTH 7 /* heuristic determined experimentally */ +static dictItem ZDICT_analyzePos( + BYTE* doneMarks, + const int* suffix, U32 start, + const void* buffer, U32 minRatio, U32 notificationLevel) +{ + U32 lengthList[LLIMIT] = {0}; + U32 cumulLength[LLIMIT] = {0}; + U32 savings[LLIMIT] = {0}; + const BYTE* b = (const BYTE*)buffer; + size_t maxLength = LLIMIT; + size_t pos = suffix[start]; + U32 end = start; + dictItem solution; + + /* init */ + memset(&solution, 0, sizeof(solution)); + doneMarks[pos] = 1; + + /* trivial repetition cases */ + if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2)) + ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3)) + ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) { + /* skip and mark segment */ + U16 const pattern16 = MEM_read16(b+pos+4); + U32 u, patternEnd = 6; + while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ; + if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++; + for (u=1; u= MINMATCHLENGTH); + } + + /* look backward */ + { size_t length; + do { + length = ZDICT_count(b + pos, b + *(suffix+start-1)); + if (length >=MINMATCHLENGTH) start--; + } while(length >= MINMATCHLENGTH); + } + + /* exit if not found a minimum nb of repetitions */ + if (end-start < minRatio) { + U32 idx; + for(idx=start; idx= %i at pos %7u ", (U32)(end-start), MINMATCHLENGTH, (U32)pos); + DISPLAYLEVEL(4, "\n"); + + for (searchLength = MINMATCHLENGTH ; ; searchLength++) { + BYTE currentChar = 0; + U32 currentCount = 0; + U32 currentID = refinedStart; + U32 id; + U32 selectedCount = 0; + U32 selectedID = currentID; + for (id =refinedStart; id < refinedEnd; id++) { + if (b[suffix[id] + searchLength] != currentChar) { + if (currentCount > selectedCount) { + selectedCount = currentCount; + selectedID = currentID; + } + currentID = id; + currentChar = b[ suffix[id] + searchLength]; + currentCount = 0; + } + currentCount ++; + } + if (currentCount > selectedCount) { /* for last */ + selectedCount = currentCount; + selectedID = currentID; + } + + if (selectedCount < minRatio) + break; + refinedStart = selectedID; + refinedEnd = refinedStart + selectedCount; + } + + /* evaluate gain based on new ref */ + start = refinedStart; + pos = suffix[refinedStart]; + end = start; + memset(lengthList, 0, sizeof(lengthList)); + + /* look forward */ + { size_t length; + do { + end++; + length = ZDICT_count(b + pos, b + suffix[end]); + if (length >= LLIMIT) length = LLIMIT-1; + lengthList[length]++; + } while (length >=MINMATCHLENGTH); + } + + /* look backward */ + { size_t length = MINMATCHLENGTH; + while ((length >= MINMATCHLENGTH) & (start > 0)) { + length = ZDICT_count(b + pos, b + suffix[start - 1]); + if (length >= LLIMIT) length = LLIMIT - 1; + lengthList[length]++; + if (length >= MINMATCHLENGTH) start--; + } + } + + /* largest useful length */ + memset(cumulLength, 0, sizeof(cumulLength)); + cumulLength[maxLength-1] = lengthList[maxLength-1]; + for (i=(int)(maxLength-2); i>=0; i--) + cumulLength[i] = cumulLength[i+1] + lengthList[i]; + + for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break; + maxLength = i; + + /* reduce maxLength in case of final into repetitive data */ + { U32 l = (U32)maxLength; + BYTE const c = b[pos + maxLength-1]; + while (b[pos+l-2]==c) l--; + maxLength = l; + } + if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */ + + /* calculate savings */ + savings[5] = 0; + for (i=MINMATCHLENGTH; i<=(int)maxLength; i++) + savings[i] = savings[i-1] + (lengthList[i] * (i-3)); + + DISPLAYLEVEL(4, "Selected ref at position %u, of length %u : saves %u (ratio: %.2f) \n", + (U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength); + + solution.pos = (U32)pos; + solution.length = (U32)maxLength; + solution.savings = savings[maxLength]; + + /* mark positions done */ + { U32 id; + for (id=start; id solution.length) length = solution.length; + } + pEnd = (U32)(testedPos + length); + for (p=testedPos; ppos; + const U32 eltEnd = elt.pos + elt.length; + const char* const buf = (const char*) buffer; + + /* tail overlap */ + U32 u; for (u=1; u elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */ + /* append */ + U32 const addedLength = table[u].pos - elt.pos; + table[u].length += addedLength; + table[u].pos = elt.pos; + table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ + table[u].savings += elt.length / 8; /* rough approx bonus */ + elt = table[u]; + /* sort : improve rank */ + while ((u>1) && (table[u-1].savings < elt.savings)) + table[u] = table[u-1], u--; + table[u] = elt; + return u; + } } + + /* front overlap */ + for (u=1; u= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */ + /* append */ + int const addedLength = (int)eltEnd - (table[u].pos + table[u].length); + table[u].savings += elt.length / 8; /* rough approx bonus */ + if (addedLength > 0) { /* otherwise, elt fully included into existing */ + table[u].length += addedLength; + table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ + } + /* sort : improve rank */ + elt = table[u]; + while ((u>1) && (table[u-1].savings < elt.savings)) + table[u] = table[u-1], u--; + table[u] = elt; + return u; + } + + if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) { + if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) { + size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 ); + table[u].pos = elt.pos; + table[u].savings += (U32)(elt.savings * addedLength / elt.length); + table[u].length = MIN(elt.length, table[u].length + 1); + return u; + } + } + } + + return 0; +} + + +static void ZDICT_removeDictItem(dictItem* table, U32 id) +{ + /* convention : table[0].pos stores nb of elts */ + U32 const max = table[0].pos; + U32 u; + if (!id) return; /* protection, should never happen */ + for (u=id; upos--; +} + + +static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer) +{ + /* merge if possible */ + U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer); + if (mergeId) { + U32 newMerge = 1; + while (newMerge) { + newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer); + if (newMerge) ZDICT_removeDictItem(table, mergeId); + mergeId = newMerge; + } + return; + } + + /* insert */ + { U32 current; + U32 nextElt = table->pos; + if (nextElt >= maxSize) nextElt = maxSize-1; + current = nextElt-1; + while (table[current].savings < elt.savings) { + table[current+1] = table[current]; + current--; + } + table[current+1] = elt; + table->pos = nextElt+1; + } +} + + +static U32 ZDICT_dictSize(const dictItem* dictList) +{ + U32 u, dictSize = 0; + for (u=1; u=l) { \ + if (ZDICT_clockSpan(displayClock) > refreshRate) \ + { displayClock = clock(); DISPLAY(__VA_ARGS__); \ + if (notificationLevel>=4) fflush(stderr); } } + + /* init */ + DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ + if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) { + result = ERROR(memory_allocation); + goto _cleanup; + } + if (minRatio < MINRATIO) minRatio = MINRATIO; + memset(doneMarks, 0, bufferSize+16); + + /* limit sample set size (divsufsort limitation)*/ + if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (U32)(ZDICT_MAX_SAMPLES_SIZE>>20)); + while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles]; + + /* sort */ + DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (U32)(bufferSize>>20)); + { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); + if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } + } + suffix[bufferSize] = (int)bufferSize; /* leads into noise */ + suffix0[0] = (int)bufferSize; /* leads into noise */ + /* build reverse suffix sort */ + { size_t pos; + for (pos=0; pos < bufferSize; pos++) + reverseSuffix[suffix[pos]] = (U32)pos; + /* note filePos tracks borders between samples. + It's not used at this stage, but planned to become useful in a later update */ + filePos[0] = 0; + for (pos=1; pos> 21); + } +} + + +typedef struct +{ + ZSTD_CCtx* ref; /* contains reference to dictionary */ + ZSTD_CCtx* zc; /* working context */ + void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */ +} EStats_ress_t; + +#define MAXREPOFFSET 1024 + +static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params, + U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets, + const void* src, size_t srcSize, + U32 notificationLevel) +{ + size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params.cParams.windowLog); + size_t cSize; + + if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ + { size_t const errorCode = ZSTD_copyCCtx(esr.zc, esr.ref, 0); + if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; } + } + cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); + if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; } + + if (cSize) { /* if == 0; block is not compressible */ + const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); + + /* literals stats */ + { const BYTE* bytePtr; + for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++) + countLit[*bytePtr]++; + } + + /* seqStats */ + { U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + ZSTD_seqToCodes(seqStorePtr); + + { const BYTE* codePtr = seqStorePtr->ofCode; + U32 u; + for (u=0; umlCode; + U32 u; + for (u=0; ullCode; + U32 u; + for (u=0; u= 2) { /* rep offsets */ + const seqDef* const seq = seqStorePtr->sequencesStart; + U32 offset1 = seq[0].offset - 3; + U32 offset2 = seq[1].offset - 3; + if (offset1 >= MAXREPOFFSET) offset1 = 0; + if (offset2 >= MAXREPOFFSET) offset2 = 0; + repOffsets[offset1] += 3; + repOffsets[offset2] += 1; + } } } +} + +static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles) +{ + size_t total=0; + unsigned u; + for (u=0; u0; u--) { + offsetCount_t tmp; + if (table[u-1].count >= table[u].count) break; + tmp = table[u-1]; + table[u-1] = table[u]; + table[u] = tmp; + } +} + +/* ZDICT_flatLit() : + * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. + * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. + */ +static void ZDICT_flatLit(U32* countLit) +{ + int u; + for (u=1; u<256; u++) countLit[u] = 2; + countLit[0] = 4; + countLit[253] = 1; + countLit[254] = 1; +} + +#define OFFCODE_MAX 30 /* only applicable to first block */ +static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, + unsigned compressionLevel, + const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles, + const void* dictBuffer, size_t dictBufferSize, + unsigned notificationLevel) +{ + U32 countLit[256]; + HUF_CREATE_STATIC_CTABLE(hufTable, 255); + U32 offcodeCount[OFFCODE_MAX+1]; + short offcodeNCount[OFFCODE_MAX+1]; + U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB)); + U32 matchLengthCount[MaxML+1]; + short matchLengthNCount[MaxML+1]; + U32 litLengthCount[MaxLL+1]; + short litLengthNCount[MaxLL+1]; + U32 repOffset[MAXREPOFFSET]; + offsetCount_t bestRepOffset[ZSTD_REP_NUM+1]; + EStats_ress_t esr; + ZSTD_parameters params; + U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total; + size_t pos = 0, errorCode; + size_t eSize = 0; + size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); + size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); + BYTE* dstPtr = (BYTE*)dstBuffer; + + /* init */ + DEBUGLOG(4, "ZDICT_analyzeEntropy"); + esr.ref = ZSTD_createCCtx(); + esr.zc = ZSTD_createCCtx(); + esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX); + if (!esr.ref || !esr.zc || !esr.workPlace) { + eSize = ERROR(memory_allocation); + DISPLAYLEVEL(1, "Not enough memory \n"); + goto _cleanup; + } + if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */ + for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */ + for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1; + for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1; + for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1; + memset(repOffset, 0, sizeof(repOffset)); + repOffset[1] = repOffset[4] = repOffset[8] = 1; + memset(bestRepOffset, 0, sizeof(bestRepOffset)); + if (compressionLevel<=0) compressionLevel = g_compressionLevel_default; + params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); + { size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0); + if (ZSTD_isError(beginResult)) { + DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced() failed : %s \n", ZSTD_getErrorName(beginResult)); + eSize = ERROR(GENERIC); + goto _cleanup; + } } + + /* collect stats on all samples */ + for (u=0; u dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize; + { size_t const dictSize = hSize + dictContentSize; + char* dictEnd = (char*)dictBuffer + dictSize; + memmove(dictEnd - dictContentSize, customDictContent, dictContentSize); + memcpy(dictBuffer, header, hSize); + return dictSize; + } +} + + +size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_params_t params) +{ + int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel; + U32 const notificationLevel = params.notificationLevel; + size_t hSize = 8; + + /* calculate entropy tables */ + DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ + DISPLAYLEVEL(2, "statistics ... \n"); + { size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize, + compressionLevel, + samplesBuffer, samplesSizes, nbSamples, + (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, + notificationLevel); + if (ZDICT_isError(eSize)) return eSize; + hSize += eSize; + } + + /* add dictionary header (after entropy tables) */ + MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY); + { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0); + U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768; + U32 const dictID = params.dictID ? params.dictID : compliantID; + MEM_writeLE32((char*)dictBuffer+4, dictID); + } + + if (hSize + dictContentSize < dictBufferCapacity) + memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize); + return MIN(dictBufferCapacity, hSize+dictContentSize); +} + + +/*! ZDICT_trainFromBuffer_unsafe_legacy() : +* Warning : `samplesBuffer` must be followed by noisy guard band. +* @return : size of dictionary, or an error code which can be tested with ZDICT_isError() +*/ +size_t ZDICT_trainFromBuffer_unsafe_legacy( + void* dictBuffer, size_t maxDictSize, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_legacy_params_t params) +{ + U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16)); + dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList)); + unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel; + unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity; + size_t const targetDictSize = maxDictSize; + size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); + size_t dictSize = 0; + U32 const notificationLevel = params.zParams.notificationLevel; + + /* checks */ + if (!dictList) return ERROR(memory_allocation); + if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */ + if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */ + + /* init */ + ZDICT_initDictItem(dictList); + + /* build dictionary */ + ZDICT_trainBuffer_legacy(dictList, dictListSize, + samplesBuffer, samplesBuffSize, + samplesSizes, nbSamples, + minRep, notificationLevel); + + /* display best matches */ + if (params.zParams.notificationLevel>= 3) { + U32 const nb = MIN(25, dictList[0].pos); + U32 const dictContentSize = ZDICT_dictSize(dictList); + U32 u; + DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", dictList[0].pos-1, dictContentSize); + DISPLAYLEVEL(3, "list %u best segments \n", nb-1); + for (u=1; u samplesBuffSize) || ((pos + length) > samplesBuffSize)) + return ERROR(GENERIC); /* should never happen */ + DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |", + u, length, pos, dictList[u].savings); + ZDICT_printHex((const char*)samplesBuffer+pos, printedLength); + DISPLAYLEVEL(3, "| \n"); + } } + + + /* create dictionary */ + { U32 dictContentSize = ZDICT_dictSize(dictList); + if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */ + if (dictContentSize < targetDictSize/4) { + DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (U32)maxDictSize); + if (samplesBuffSize < 10 * targetDictSize) + DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20)); + if (minRep > MINRATIO) { + DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1); + DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n"); + } + } + + if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) { + U32 proposedSelectivity = selectivity-1; + while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; } + DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (U32)maxDictSize); + DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity); + DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n"); + } + + /* limit dictionary size */ + { U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */ + U32 currentSize = 0; + U32 n; for (n=1; n targetDictSize) { currentSize -= dictList[n].length; break; } + } + dictList->pos = n; + dictContentSize = currentSize; + } + + /* build dict content */ + { U32 u; + BYTE* ptr = (BYTE*)dictBuffer + maxDictSize; + for (u=1; upos; u++) { + U32 l = dictList[u].length; + ptr -= l; + if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */ + memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l); + } } + + dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize, + samplesBuffer, samplesSizes, nbSamples, + params.zParams); + } + + /* clean up */ + free(dictList); + return dictSize; +} + + +/* ZDICT_trainFromBuffer_legacy() : + * issue : samplesBuffer need to be followed by a noisy guard band. + * work around : duplicate the buffer, and add the noise */ +size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_legacy_params_t params) +{ + size_t result; + void* newBuff; + size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); + if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */ + + newBuff = malloc(sBuffSize + NOISELENGTH); + if (!newBuff) return ERROR(memory_allocation); + + memcpy(newBuff, samplesBuffer, sBuffSize); + ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */ + + result = + ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff, + samplesSizes, nbSamples, params); + free(newBuff); + return result; +} + + +size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) +{ + ZDICT_cover_params_t params; + DEBUGLOG(3, "ZDICT_trainFromBuffer"); + memset(¶ms, 0, sizeof(params)); + params.d = 8; + params.steps = 4; + /* Default to level 6 since no compression level information is available */ + params.zParams.compressionLevel = 6; +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1) + params.zParams.notificationLevel = ZSTD_DEBUG; +#endif + return ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, dictBufferCapacity, + samplesBuffer, samplesSizes, nbSamples, + ¶ms); +} + +size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) +{ + ZDICT_params_t params; + memset(¶ms, 0, sizeof(params)); + return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity, + samplesBuffer, samplesSizes, nbSamples, + params); +} diff --git a/vendor/github.com/DataDog/zstd/zdict.h b/vendor/github.com/DataDog/zstd/zdict.h new file mode 100644 index 00000000000..ad459c2d7d5 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zdict.h @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef DICTBUILDER_H_001 +#define DICTBUILDER_H_001 + +#if defined (__cplusplus) +extern "C" { +#endif + + +/*====== Dependencies ======*/ +#include /* size_t */ + + +/* ===== ZDICTLIB_API : control library symbols visibility ===== */ +#ifndef ZDICTLIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define ZDICTLIB_VISIBILITY +# endif +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZDICTLIB_API ZDICTLIB_VISIBILITY +#endif + + +/*! ZDICT_trainFromBuffer(): + * Train a dictionary from an array of samples. + * Redirect towards ZDICT_optimizeTrainFromBuffer_cover() single-threaded, with d=8 and steps=4. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * Note: ZDICT_trainFromBuffer() requires about 9 bytes of memory for each input byte. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ +ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); + + +/*====== Helper functions ======*/ +ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */ +ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); +ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); + + + +#ifdef ZDICT_STATIC_LINKING_ONLY + +/* ==================================================================================== + * The definitions in this section are considered experimental. + * They should never be used with a dynamic library, as they may change in the future. + * They are provided for advanced usages. + * Use them only in association with static linking. + * ==================================================================================== */ + +typedef struct { + int compressionLevel; /* optimize for a specific zstd compression level; 0 means default */ + unsigned notificationLevel; /* Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + unsigned dictID; /* force dictID value; 0 means auto mode (32-bits random value) */ +} ZDICT_params_t; + +/*! ZDICT_cover_params_t: + * k and d are the only required parameters. + * For others, value 0 means default. + */ +typedef struct { + unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ + unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ + unsigned steps; /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */ + unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ + ZDICT_params_t zParams; +} ZDICT_cover_params_t; + + +/*! ZDICT_trainFromBuffer_cover(): + * Train a dictionary from an array of samples using the COVER algorithm. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ +ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( + void *dictBuffer, size_t dictBufferCapacity, + const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t parameters); + +/*! ZDICT_optimizeTrainFromBuffer_cover(): + * The same requirements as above hold for all the parameters except `parameters`. + * This function tries many parameter combinations and picks the best parameters. + * `*parameters` is filled with the best parameters found, + * dictionary constructed with those parameters is stored in `dictBuffer`. + * + * All of the parameters d, k, steps are optional. + * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}. + * if steps is zero it defaults to its default value. + * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048]. + * + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * On success `*parameters` contains the parameters selected. + * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. + */ +ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( + void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t* parameters); + +/*! ZDICT_finalizeDictionary(): + * Given a custom content as a basis for dictionary, and a set of samples, + * finalize dictionary by adding headers and statistics. + * + * Samples must be stored concatenated in a flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample in order. + * + * dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes. + * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes. + * + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`), + * or an error code, which can be tested by ZDICT_isError(). + * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0. + * Note 2: dictBuffer and dictContent can overlap + */ +#define ZDICT_CONTENTSIZE_MIN 128 +#define ZDICT_DICTSIZE_MIN 256 +ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity, + const void* dictContent, size_t dictContentSize, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_params_t parameters); + +typedef struct { + unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */ + ZDICT_params_t zParams; +} ZDICT_legacy_params_t; + +/*! ZDICT_trainFromBuffer_legacy(): + * Train a dictionary from an array of samples. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * The resulting dictionary will be saved into `dictBuffer`. + * `parameters` is optional and can be provided with values set to 0 to mean "default". + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. + */ +ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( + void *dictBuffer, size_t dictBufferCapacity, + const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, + ZDICT_legacy_params_t parameters); + +/* Deprecation warnings */ +/* It is generally possible to disable deprecation warnings from compiler, + for example with -Wno-deprecated-declarations for gcc + or _CRT_SECURE_NO_WARNINGS in Visual. + Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ +#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS +# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ +#else +# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API +# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) +# elif (ZDICT_GCC_VERSION >= 301) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") +# define ZDICT_DEPRECATED(message) ZDICTLIB_API +# endif +#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ + +ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") +size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); + + +#endif /* ZDICT_STATIC_LINKING_ONLY */ + +#if defined (__cplusplus) +} +#endif + +#endif /* DICTBUILDER_H_001 */ diff --git a/vendor/github.com/DataDog/zstd/zstd.go b/vendor/github.com/DataDog/zstd/zstd.go new file mode 100644 index 00000000000..e9953d6cc3a --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd.go @@ -0,0 +1,144 @@ +package zstd + +/* +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "stdint.h" // for uintptr_t + +// The following *_wrapper function are used for removing superflouos +// memory allocations when calling the wrapped functions from Go code. +// See https://github.com/golang/go/issues/24450 for details. + +static size_t ZSTD_compress_wrapper(uintptr_t dst, size_t maxDstSize, const uintptr_t src, size_t srcSize, int compressionLevel) { + return ZSTD_compress((void*)dst, maxDstSize, (const void*)src, srcSize, compressionLevel); +} + +static size_t ZSTD_decompress_wrapper(uintptr_t dst, size_t maxDstSize, uintptr_t src, size_t srcSize) { + return ZSTD_decompress((void*)dst, maxDstSize, (const void *)src, srcSize); +} + +*/ +import "C" +import ( + "bytes" + "errors" + "io/ioutil" + "unsafe" +) + +// Defines best and standard values for zstd cli +const ( + BestSpeed = 1 + BestCompression = 20 + DefaultCompression = 5 +) + +var ( + // ErrEmptySlice is returned when there is nothing to compress + ErrEmptySlice = errors.New("Bytes slice is empty") +) + +// CompressBound returns the worst case size needed for a destination buffer, +// which can be used to preallocate a destination buffer or select a previously +// allocated buffer from a pool. +// See zstd.h to mirror implementation of ZSTD_COMPRESSBOUND +func CompressBound(srcSize int) int { + lowLimit := 128 << 10 // 128 kB + var margin int + if srcSize < lowLimit { + margin = (lowLimit - srcSize) >> 11 + } + return srcSize + (srcSize >> 8) + margin +} + +// cCompressBound is a cgo call to check the go implementation above against the c code. +func cCompressBound(srcSize int) int { + return int(C.ZSTD_compressBound(C.size_t(srcSize))) +} + +// Compress src into dst. If you have a buffer to use, you can pass it to +// prevent allocation. If it is too small, or if nil is passed, a new buffer +// will be allocated and returned. +func Compress(dst, src []byte) ([]byte, error) { + return CompressLevel(dst, src, DefaultCompression) +} + +// CompressLevel is the same as Compress but you can pass a compression level +func CompressLevel(dst, src []byte, level int) ([]byte, error) { + bound := CompressBound(len(src)) + if cap(dst) >= bound { + dst = dst[0:bound] // Reuse dst buffer + } else { + dst = make([]byte, bound) + } + + srcPtr := C.uintptr_t(uintptr(0)) // Do not point anywhere, if src is empty + if len(src) > 0 { + srcPtr = C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))) + } + + cWritten := C.ZSTD_compress_wrapper( + C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), + C.size_t(len(dst)), + srcPtr, + C.size_t(len(src)), + C.int(level)) + + written := int(cWritten) + // Check if the return is an Error code + if err := getError(written); err != nil { + return nil, err + } + return dst[:written], nil +} + +// Decompress src into dst. If you have a buffer to use, you can pass it to +// prevent allocation. If it is too small, or if nil is passed, a new buffer +// will be allocated and returned. +func Decompress(dst, src []byte) ([]byte, error) { + if len(src) == 0 { + return []byte{}, ErrEmptySlice + } + decompress := func(dst, src []byte) ([]byte, error) { + + cWritten := C.ZSTD_decompress_wrapper( + C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))), + C.size_t(len(dst)), + C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))), + C.size_t(len(src))) + + written := int(cWritten) + // Check error + if err := getError(written); err != nil { + return nil, err + } + return dst[:written], nil + } + + if dst == nil { + // Attempt to use zStd to determine decompressed size (may result in error or 0) + size := int(C.size_t(C.ZSTD_getDecompressedSize(unsafe.Pointer(&src[0]), C.size_t(len(src))))) + + if err := getError(size); err != nil { + return nil, err + } + + if size > 0 { + dst = make([]byte, size) + } else { + dst = make([]byte, len(src)*3) // starting guess + } + } + for i := 0; i < 3; i++ { // 3 tries to allocate a bigger buffer + result, err := decompress(dst, src) + if !IsDstSizeTooSmallError(err) { + return result, err + } + dst = make([]byte, len(dst)*2) // Grow buffer by 2 + } + + // We failed getting a dst buffer of correct size, use stream API + r := NewReader(bytes.NewReader(src)) + defer r.Close() + return ioutil.ReadAll(r) +} diff --git a/vendor/github.com/DataDog/zstd/zstd.h b/vendor/github.com/DataDog/zstd/zstd.h new file mode 100644 index 00000000000..6405da602e8 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd.h @@ -0,0 +1,1399 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef ZSTD_H_235446 +#define ZSTD_H_235446 + +/* ====== Dependency ======*/ +#include /* size_t */ + + +/* ===== ZSTDLIB_API : control library symbols visibility ===== */ +#ifndef ZSTDLIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define ZSTDLIB_VISIBILITY +# endif +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZSTDLIB_API ZSTDLIB_VISIBILITY +#endif + + +/******************************************************************************************************* + Introduction + + zstd, short for Zstandard, is a fast lossless compression algorithm, + targeting real-time compression scenarios at zlib-level and better compression ratios. + The zstd compression library provides in-memory compression and decompression functions. + The library supports compression levels from 1 up to ZSTD_maxCLevel() which is currently 22. + Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory. + Compression can be done in: + - a single step (described as Simple API) + - a single step, reusing a context (described as Explicit context) + - unbounded multiple steps (described as Streaming compression) + The compression ratio achievable on small data can be highly improved using a dictionary in: + - a single step (described as Simple dictionary API) + - a single step, reusing a dictionary (described as Bulk-processing dictionary API) + + Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h. + Advanced experimental APIs shall never be used with a dynamic library. + They are not "stable", their definition may change in the future. Only static linking is allowed. +*********************************************************************************************************/ + +/*------ Version ------*/ +#define ZSTD_VERSION_MAJOR 1 +#define ZSTD_VERSION_MINOR 3 +#define ZSTD_VERSION_RELEASE 4 + +#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) +ZSTDLIB_API unsigned ZSTD_versionNumber(void); /**< useful to check dll version */ + +#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE +#define ZSTD_QUOTE(str) #str +#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str) +#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION) +ZSTDLIB_API const char* ZSTD_versionString(void); /* added in v1.3.0 */ + + +/*************************************** +* Simple API +***************************************/ +/*! ZSTD_compress() : + * Compresses `src` content as a single zstd compressed frame into already allocated `dst`. + * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + * @return : compressed size written into `dst` (<= `dstCapacity), + * or an error code if it fails (which can be tested using ZSTD_isError()). */ +ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel); + +/*! ZSTD_decompress() : + * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + * `dstCapacity` is an upper bound of originalSize to regenerate. + * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. + * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ +ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, + const void* src, size_t compressedSize); + +/*! ZSTD_getFrameContentSize() : added in v1.3.0 + * `src` should point to the start of a ZSTD encoded frame. + * `srcSize` must be at least as large as the frame header. + * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. + * @return : - decompressed size of the frame in `src`, if known + * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined + * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) + * note 1 : a 0 return value means the frame is valid but "empty". + * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. + * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. + * In which case, it's necessary to use streaming mode to decompress data. + * Optionally, application can rely on some implicit limit, + * as ZSTD_decompress() only needs an upper bound of decompressed size. + * (For example, data could be necessarily cut into blocks <= 16 KB). + * note 3 : decompressed size is always present when compression is done with ZSTD_compress() + * note 4 : decompressed size can be very large (64-bits value), + * potentially larger than what local system can handle as a single memory segment. + * In which case, it's necessary to use streaming mode to decompress data. + * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. + * Always ensure return value fits within application's authorized limits. + * Each application can set its own limits. + * note 6 : This function replaces ZSTD_getDecompressedSize() */ +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) +#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) +ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); + +/*! ZSTD_getDecompressedSize() : + * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). + * Both functions work the same way, but ZSTD_getDecompressedSize() blends + * "empty", "unknown" and "error" results to the same return value (0), + * while ZSTD_getFrameContentSize() gives them separate return values. + * `src` is the start of a zstd compressed frame. + * @return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise. */ +ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); + + +/*====== Helper functions ======*/ +#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ +ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ +ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ +ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ +ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ + + +/*************************************** +* Explicit context +***************************************/ +/*= Compression context + * When compressing many times, + * it is recommended to allocate a context just once, and re-use it for each successive compression operation. + * This will make workload friendlier for system's memory. + * Use one context per thread for parallel execution in multi-threaded environments. */ +typedef struct ZSTD_CCtx_s ZSTD_CCtx; +ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); +ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); + +/*! ZSTD_compressCCtx() : + * Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */ +ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel); + +/*= Decompression context + * When decompressing many times, + * it is recommended to allocate a context only once, + * and re-use it for each successive compression operation. + * This will make workload friendlier for system's memory. + * Use one context per thread for parallel execution. */ +typedef struct ZSTD_DCtx_s ZSTD_DCtx; +ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void); +ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); + +/*! ZSTD_decompressDCtx() : + * Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()) */ +ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + + +/************************** +* Simple dictionary API +***************************/ +/*! ZSTD_compress_usingDict() : + * Compression using a predefined Dictionary (see dictBuilder/zdict.h). + * Note : This function loads the dictionary, resulting in significant startup delay. + * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ +ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + int compressionLevel); + +/*! ZSTD_decompress_usingDict() : + * Decompression using a predefined Dictionary (see dictBuilder/zdict.h). + * Dictionary must be identical to the one used during compression. + * Note : This function loads the dictionary, resulting in significant startup delay. + * Note : When `dict == NULL || dictSize < 8` no dictionary is used. */ +ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize); + + +/********************************** + * Bulk processing dictionary API + *********************************/ +typedef struct ZSTD_CDict_s ZSTD_CDict; + +/*! ZSTD_createCDict() : + * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once. + * ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay. + * ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only. + * `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict */ +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize, + int compressionLevel); + +/*! ZSTD_freeCDict() : + * Function frees memory allocated by ZSTD_createCDict(). */ +ZSTDLIB_API size_t ZSTD_freeCDict(ZSTD_CDict* CDict); + +/*! ZSTD_compress_usingCDict() : + * Compression using a digested Dictionary. + * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + * Note that compression level is decided during dictionary creation. + * Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */ +ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict); + + +typedef struct ZSTD_DDict_s ZSTD_DDict; + +/*! ZSTD_createDDict() : + * Create a digested dictionary, ready to start decompression operation without startup delay. + * dictBuffer can be released after DDict creation, as its content is copied inside DDict */ +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize); + +/*! ZSTD_freeDDict() : + * Function frees memory allocated with ZSTD_createDDict() */ +ZSTDLIB_API size_t ZSTD_freeDDict(ZSTD_DDict* ddict); + +/*! ZSTD_decompress_usingDDict() : + * Decompression using a digested Dictionary. + * Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */ +ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_DDict* ddict); + + +/**************************** +* Streaming +****************************/ + +typedef struct ZSTD_inBuffer_s { + const void* src; /**< start of input buffer */ + size_t size; /**< size of input buffer */ + size_t pos; /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */ +} ZSTD_inBuffer; + +typedef struct ZSTD_outBuffer_s { + void* dst; /**< start of output buffer */ + size_t size; /**< size of output buffer */ + size_t pos; /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */ +} ZSTD_outBuffer; + + + +/*-*********************************************************************** +* Streaming compression - HowTo +* +* A ZSTD_CStream object is required to track streaming operation. +* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. +* ZSTD_CStream objects can be reused multiple times on consecutive compression operations. +* It is recommended to re-use ZSTD_CStream in situations where many streaming operations will be achieved consecutively, +* since it will play nicer with system's memory, by re-using already allocated memory. +* Use one separate ZSTD_CStream per thread for parallel execution. +* +* Start a new compression by initializing ZSTD_CStream. +* Use ZSTD_initCStream() to start a new compression operation. +* Use ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for a compression which requires a dictionary (experimental section) +* +* Use ZSTD_compressStream() repetitively to consume input stream. +* The function will automatically update both `pos` fields. +* Note that it may not consume the entire input, in which case `pos < size`, +* and it's up to the caller to present again remaining data. +* @return : a size hint, preferred nb of bytes to use as input for next function call +* or an error code, which can be tested using ZSTD_isError(). +* Note 1 : it's just a hint, to help latency a little, any other value will work fine. +* Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize() +* +* At any moment, it's possible to flush whatever data remains within internal buffer, using ZSTD_flushStream(). +* `output->pos` will be updated. +* Note that some content might still be left within internal buffer if `output->size` is too small. +* @return : nb of bytes still present within internal buffer (0 if it's empty) +* or an error code, which can be tested using ZSTD_isError(). +* +* ZSTD_endStream() instructs to finish a frame. +* It will perform a flush and write frame epilogue. +* The epilogue is required for decoders to consider a frame completed. +* ZSTD_endStream() may not be able to flush full data if `output->size` is too small. +* In which case, call again ZSTD_endStream() to complete the flush. +* @return : 0 if frame fully completed and fully flushed, + or >0 if some data is still present within internal buffer + (value is minimum size estimation for remaining data to flush, but it could be more) +* or an error code, which can be tested using ZSTD_isError(). +* +* *******************************************************************/ + +typedef ZSTD_CCtx ZSTD_CStream; /**< CCtx and CStream are now effectively same object (>= v1.3.0) */ + /* Continue to distinguish them for compatibility with versions <= v1.2.0 */ +/*===== ZSTD_CStream management functions =====*/ +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void); +ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs); + +/*===== Streaming compression functions =====*/ +ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); +ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); +ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); +ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); + +ZSTDLIB_API size_t ZSTD_CStreamInSize(void); /**< recommended size for input buffer */ +ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */ + + + +/*-*************************************************************************** +* Streaming decompression - HowTo +* +* A ZSTD_DStream object is required to track streaming operations. +* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. +* ZSTD_DStream objects can be re-used multiple times. +* +* Use ZSTD_initDStream() to start a new decompression operation, +* or ZSTD_initDStream_usingDict() if decompression requires a dictionary. +* @return : recommended first input size +* +* Use ZSTD_decompressStream() repetitively to consume your input. +* The function will update both `pos` fields. +* If `input.pos < input.size`, some input has not been consumed. +* It's up to the caller to present again remaining data. +* If `output.pos < output.size`, decoder has flushed everything it could. +* @return : 0 when a frame is completely decoded and fully flushed, +* an error code, which can be tested using ZSTD_isError(), +* any other value > 0, which means there is still some decoding to do to complete current frame. +* The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame. +* *******************************************************************************/ + +typedef ZSTD_DCtx ZSTD_DStream; /**< DCtx and DStream are now effectively same object (>= v1.3.0) */ + /* For compatibility with versions <= v1.2.0, continue to consider them separated. */ +/*===== ZSTD_DStream management functions =====*/ +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void); +ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); + +/*===== Streaming decompression functions =====*/ +ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds); +ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); + +ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ +ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */ + +#endif /* ZSTD_H_235446 */ + + + +/**************************************************************************************** + * START OF ADVANCED AND EXPERIMENTAL FUNCTIONS + * The definitions in this section are considered experimental. + * They should never be used with a dynamic library, as prototypes may change in the future. + * They are provided for advanced scenarios. + * Use them only in association with static linking. + * ***************************************************************************************/ + +#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) +#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY + +/* --- Constants ---*/ +#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ +#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U +#define ZSTD_MAGIC_DICTIONARY 0xEC30A437 /* >= v0.7.0 */ + +#define ZSTD_WINDOWLOG_MAX_32 30 +#define ZSTD_WINDOWLOG_MAX_64 31 +#define ZSTD_WINDOWLOG_MAX ((unsigned)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64)) +#define ZSTD_WINDOWLOG_MIN 10 +#define ZSTD_HASHLOG_MAX ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30) +#define ZSTD_HASHLOG_MIN 6 +#define ZSTD_CHAINLOG_MAX_32 29 +#define ZSTD_CHAINLOG_MAX_64 30 +#define ZSTD_CHAINLOG_MAX ((unsigned)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64)) +#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN +#define ZSTD_HASHLOG3_MAX 17 +#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) +#define ZSTD_SEARCHLOG_MIN 1 +#define ZSTD_SEARCHLENGTH_MAX 7 /* only for ZSTD_fast, other strategies are limited to 6 */ +#define ZSTD_SEARCHLENGTH_MIN 3 /* only for ZSTD_btopt, other strategies are limited to 4 */ +#define ZSTD_TARGETLENGTH_MIN 1 /* only used by btopt, btultra and btfast */ +#define ZSTD_LDM_MINMATCH_MIN 4 +#define ZSTD_LDM_MINMATCH_MAX 4096 +#define ZSTD_LDM_BUCKETSIZELOG_MAX 8 + +#define ZSTD_FRAMEHEADERSIZE_PREFIX 5 /* minimum input size to know frame header size */ +#define ZSTD_FRAMEHEADERSIZE_MIN 6 +#define ZSTD_FRAMEHEADERSIZE_MAX 18 /* for static allocation */ +static const size_t ZSTD_frameHeaderSize_prefix = ZSTD_FRAMEHEADERSIZE_PREFIX; +static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; +static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; +static const size_t ZSTD_skippableHeaderSize = 8; /* magic number + skippable frame length */ + + +/*--- Advanced types ---*/ +typedef enum { ZSTD_fast=1, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, + ZSTD_btlazy2, ZSTD_btopt, ZSTD_btultra } ZSTD_strategy; /* from faster to stronger */ + +typedef struct { + unsigned windowLog; /**< largest match distance : larger == more compression, more memory needed during decompression */ + unsigned chainLog; /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */ + unsigned hashLog; /**< dispatch table : larger == faster, more memory */ + unsigned searchLog; /**< nb of searches : larger == more compression, slower */ + unsigned searchLength; /**< match length searched : larger == faster decompression, sometimes less compression */ + unsigned targetLength; /**< acceptable match size for optimal parser (only) : larger == more compression, slower */ + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +typedef struct { + unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */ + unsigned checksumFlag; /**< 1: generate a 32-bits checksum at end of frame, for error detection */ + unsigned noDictIDFlag; /**< 1: no dictID will be saved into frame header (if dictionary compression) */ +} ZSTD_frameParameters; + +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params; + +typedef enum { + ZSTD_dct_auto=0, /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */ + ZSTD_dct_rawContent, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */ + ZSTD_dct_fullDict /* refuses to load a dictionary if it does not respect Zstandard's specification */ +} ZSTD_dictContentType_e; + +typedef enum { + ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */ + ZSTD_dlm_byRef, /**< Reference dictionary content -- the dictionary buffer must outlive its users. */ +} ZSTD_dictLoadMethod_e; + + + +/*************************************** +* Frame size functions +***************************************/ + +/*! ZSTD_findFrameCompressedSize() : + * `src` should point to the start of a ZSTD encoded frame or skippable frame + * `srcSize` must be >= first frame size + * @return : the compressed size of the first frame starting at `src`, + * suitable to pass to `ZSTD_decompress` or similar, + * or an error code if input is invalid */ +ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); + +/*! ZSTD_findDecompressedSize() : + * `src` should point the start of a series of ZSTD encoded and/or skippable frames + * `srcSize` must be the _exact_ size of this series + * (i.e. there should be a frame boundary exactly at `srcSize` bytes after `src`) + * @return : - decompressed size of all data in all successive frames + * - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN + * - if an error occurred: ZSTD_CONTENTSIZE_ERROR + * + * note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. + * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. + * In which case, it's necessary to use streaming mode to decompress data. + * note 2 : decompressed size is always present when compression is done with ZSTD_compress() + * note 3 : decompressed size can be very large (64-bits value), + * potentially larger than what local system can handle as a single memory segment. + * In which case, it's necessary to use streaming mode to decompress data. + * note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. + * Always ensure result fits within application's authorized limits. + * Each application can set its own limits. + * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to + * read each contained frame header. This is fast as most of the data is skipped, + * however it does mean that all frame data must be present and valid. */ +ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); + +/*! ZSTD_frameHeaderSize() : +* `src` should point to the start of a ZSTD frame +* `srcSize` must be >= ZSTD_frameHeaderSize_prefix. +* @return : size of the Frame Header */ +ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); + + +/*************************************** +* Memory management +***************************************/ + +/*! ZSTD_sizeof_*() : + * These functions give the current memory usage of selected object. + * Object memory usage can evolve when re-used. */ +ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); +ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx); +ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs); +ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); +ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); +ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); + +/*! ZSTD_estimate*() : + * These functions make it possible to estimate memory usage + * of a future {D,C}Ctx, before its creation. + * ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one. + * It will also consider src size to be arbitrarily "large", which is worst case. + * If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation. + * ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. + * ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1. + * Note : CCtx size estimation is only correct for single-threaded compression. */ +ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel); +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); +ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); + +/*! ZSTD_estimateCStreamSize() : + * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. + * It will also consider src size to be arbitrarily "large", which is worst case. + * If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation. + * ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel. + * ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1. + * Note : CStream size estimation is only correct for single-threaded compression. + * ZSTD_DStream memory budget depends on window Size. + * This information can be passed manually, using ZSTD_estimateDStreamSize, + * or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame(); + * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), + * an internal ?Dict will be created, which additional size is not estimated here. + * In this case, get total size by adding ZSTD_estimate?DictSize */ +ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel); +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); +ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize); +ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); + +/*! ZSTD_estimate?DictSize() : + * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). + * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). + * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. + */ +ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); +ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); +ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); + +/*! ZSTD_initStatic*() : + * Initialize an object using a pre-allocated fixed-size buffer. + * workspace: The memory area to emplace the object into. + * Provided pointer *must be 8-bytes aligned*. + * Buffer must outlive object. + * workspaceSize: Use ZSTD_estimate*Size() to determine + * how large workspace must be to support target scenario. + * @return : pointer to object (same address as workspace, just different type), + * or NULL if error (size too small, incorrect alignment, etc.) + * Note : zstd will never resize nor malloc() when using a static buffer. + * If the object requires more memory than available, + * zstd will just error out (typically ZSTD_error_memory_allocation). + * Note 2 : there is no corresponding "free" function. + * Since workspace is allocated externally, it must be freed externally too. + * Note 3 : cParams : use ZSTD_getCParams() to convert a compression level + * into its associated cParams. + * Limitation 1 : currently not compatible with internal dictionary creation, triggered by + * ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict(). + * Limitation 2 : static cctx currently not compatible with multi-threading. + * Limitation 3 : static dctx is incompatible with legacy support. + */ +ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); +ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ + +ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); +ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ + +ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict( + void* workspace, size_t workspaceSize, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams); + +ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict( + void* workspace, size_t workspaceSize, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType); + +/*! Custom memory allocation : + * These prototypes make it possible to pass your own allocation/free functions. + * ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below. + * All allocation/free operations will be completed using these custom variants instead of regular ones. + */ +typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size); +typedef void (*ZSTD_freeFunction) (void* opaque, void* address); +typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; +static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ + +ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); + +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams, + ZSTD_customMem customMem); + +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_customMem customMem); + + + +/*************************************** +* Advanced compression functions +***************************************/ + +/*! ZSTD_createCDict_byReference() : + * Create a digested dictionary for compression + * Dictionary content is simply referenced, and therefore stays in dictBuffer. + * It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */ +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); + +/*! ZSTD_getCParams() : +* @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. +* `estimatedSrcSize` value is optional, select 0 if not known */ +ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); + +/*! ZSTD_getParams() : +* same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. +* All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */ +ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); + +/*! ZSTD_checkCParams() : +* Ensure param values remain within authorized range */ +ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); + +/*! ZSTD_adjustCParams() : + * optimize params for a given `srcSize` and `dictSize`. + * both values are optional, select `0` if unknown. */ +ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); + +/*! ZSTD_compress_advanced() : +* Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter */ +ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params); + +/*! ZSTD_compress_usingCDict_advanced() : +* Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */ +ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, ZSTD_frameParameters fParams); + + +/*--- Advanced decompression functions ---*/ + +/*! ZSTD_isFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. + * Note 3 : Skippable Frame Identifiers are considered valid. */ +ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size); + +/*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, ready to start decompression operation without startup delay. + * Dictionary content is referenced, and therefore stays in dictBuffer. + * It is important that dictBuffer outlives DDict, + * it must remain read accessible throughout the lifetime of DDict */ +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); + + +/*! ZSTD_getDictID_fromDict() : + * Provides the dictID stored within dictionary. + * if @return == 0, the dictionary is not conformant with Zstandard specification. + * It can still be loaded, but as a content-only dictionary. */ +ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); + +/*! ZSTD_getDictID_fromDDict() : + * Provides the dictID of the dictionary loaded into `ddict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); + +/*! ZSTD_getDictID_fromFrame() : + * Provides the dictID required to decompressed the frame stored within `src`. + * If @return == 0, the dictID could not be decoded. + * This could for one of the following reasons : + * - The frame does not require a dictionary to be decoded (most common case). + * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. + * Note : this use case also happens when using a non-conformant dictionary. + * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). + * - This is not a Zstandard frame. + * When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */ +ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); + + +/******************************************************************** +* Advanced streaming functions +********************************************************************/ + +/*===== Advanced Streaming compression functions =====*/ +ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct. If it is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, "0" also disables frame content size field. It may be enabled in the future. */ +ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/ +ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */ +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /**< note : cdict will just be referenced, and must outlive compression session */ +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters. pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. */ + +/*! ZSTD_resetCStream() : + * start a new compression job, using same parameters from previous job. + * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.. + * Note that zcs must be init at least once before using ZSTD_resetCStream(). + * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. + * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. + * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, + * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead. + * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ +ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); + + +typedef struct { + unsigned long long ingested; + unsigned long long consumed; + unsigned long long produced; +} ZSTD_frameProgression; + +/* ZSTD_getFrameProgression(): + * tells how much data has been ingested (read from input) + * consumed (input actually compressed) and produced (output) for current frame. + * Therefore, (ingested - consumed) is amount of input data buffered internally, not yet compressed. + * Can report progression inside worker threads (multi-threading and non-blocking mode). + */ +ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); + + + +/*===== Advanced Streaming decompression functions =====*/ +typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e; +ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue); /* obsolete : this API will be removed in a future version */ +ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */ +ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /**< note : ddict is referenced, it must outlive decompression session */ +ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /**< re-use decompression parameters from previous init; saves dictionary loading */ + + +/********************************************************************* +* Buffer-less and synchronous inner streaming functions +* +* This is an advanced API, giving full control over buffer management, for users which need direct control over memory. +* But it's also a complex one, with several restrictions, documented below. +* Prefer normal streaming API for an easier experience. +********************************************************************* */ + +/** + Buffer-less streaming compression (synchronous mode) + + A ZSTD_CCtx object is required to track streaming operations. + Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. + ZSTD_CCtx object can be re-used multiple times within successive compression operations. + + Start by initializing a context. + Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression, + or ZSTD_compressBegin_advanced(), for finer parameter control. + It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() + + Then, consume your input using ZSTD_compressContinue(). + There are some important considerations to keep in mind when using this advanced function : + - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only. + - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks. + - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario. + Worst case evaluation is provided by ZSTD_compressBound(). + ZSTD_compressContinue() doesn't guarantee recover after a failed compression. + - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog). + It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks) + - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps. + In which case, it will "discard" the relevant memory section from its history. + + Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum. + It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. + Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. + + `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again. +*/ + +/*===== Buffer-less streaming compression functions =====*/ +ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); +ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); +ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ + +ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); + + +/*- + Buffer-less streaming decompression (synchronous mode) + + A ZSTD_DCtx object is required to track streaming operations. + Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it. + A ZSTD_DCtx object can be re-used multiple times. + + First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader(). + Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough. + Data fragment must be large enough to ensure successful decoding. + `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough. + @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled. + >0 : `srcSize` is too small, please provide at least @result bytes on next attempt. + errorCode, which can be tested using ZSTD_isError(). + + It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, + such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`). + Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. + As a consequence, check that values remain within valid application range. + For example, do not allocate memory blindly, check that `windowSize` is within expectation. + Each application can set its own limits, depending on local restrictions. + For extended interoperability, it is recommended to support `windowSize` of at least 8 MB. + + ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes. + ZSTD_decompressContinue() is very sensitive to contiguity, + if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place, + or that previous contiguous segment is large enough to properly handle maximum back-reference distance. + There are multiple ways to guarantee this condition. + + The most memory efficient way is to use a round buffer of sufficient size. + Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), + which can @return an error code if required value is too large for current system (in 32-bits mode). + In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, + up to the moment there is not enough room left in the buffer to guarantee decoding another full block, + which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. + At which point, decoding can resume from the beginning of the buffer. + Note that already decoded data stored in the buffer should be flushed before being overwritten. + + There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory. + + Finally, if you control the compression process, you can also ignore all buffer size rules, + as long as the encoder and decoder progress in "lock-step", + aka use exactly the same buffer sizes, break contiguity at the same place, etc. + + Once buffers are setup, start decompression, with ZSTD_decompressBegin(). + If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict(). + + Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively. + ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). + ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. + + @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity). + It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. + It can also be an error code, which can be tested with ZSTD_isError(). + + A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. + Context can then be reset to start a new decompression. + + Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType(). + This information is not required to properly decode a frame. + + == Special case : skippable frames == + + Skippable frames allow integration of user-defined data into a flow of concatenated frames. + Skippable frames will be ignored (skipped) by decompressor. + The format of skippable frames is as follows : + a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F + b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits + c) Frame Content - any content (User Data) of length equal to Frame Size + For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame. + For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content. +*/ + +/*===== Buffer-less streaming decompression functions =====*/ +typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; +typedef struct { + unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ + unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ + unsigned blockSizeMax; + ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ + unsigned headerSize; + unsigned dictID; + unsigned checksumFlag; +} ZSTD_frameHeader; +ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ +ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ + +ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); + +ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); +ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); + +/* misc */ +ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); +typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; +ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); + + + +/* ============================================ */ +/** New advanced API (experimental) */ +/* ============================================ */ + +/* notes on API design : + * In this proposal, parameters are pushed one by one into an existing context, + * and then applied on all subsequent compression jobs. + * When no parameter is ever provided, CCtx is created with compression level ZSTD_CLEVEL_DEFAULT. + * + * This API is intended to replace all others advanced / experimental API entry points. + * But it stands a reasonable chance to become "stable", after a reasonable testing period. + */ + +/* note on naming convention : + * Initially, the API favored names like ZSTD_setCCtxParameter() . + * In this proposal, convention is changed towards ZSTD_CCtx_setParameter() . + * The main driver is that it identifies more clearly the target object type. + * It feels clearer when considering multiple targets : + * ZSTD_CDict_setParameter() (rather than ZSTD_setCDictParameter()) + * ZSTD_CCtxParams_setParameter() (rather than ZSTD_setCCtxParamsParameter() ) + * etc... + */ + +/* note on enum design : + * All enum will be pinned to explicit values before reaching "stable API" status */ + +typedef enum { + /* Opened question : should we have a format ZSTD_f_auto ? + * Today, it would mean exactly the same as ZSTD_f_zstd1. + * But, in the future, should several formats become supported, + * on the compression side, it would mean "default format". + * On the decompression side, it would mean "automatic format detection", + * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames". + * Since meaning is a little different, another option could be to define different enums for compression and decompression. + * This question could be kept for later, when there are actually multiple formats to support, + * but there is also the question of pinning enum values, and pinning value `0` is especially important */ + ZSTD_f_zstd1 = 0, /* zstd frame format, specified in zstd_compression_format.md (default) */ + ZSTD_f_zstd1_magicless, /* Variant of zstd frame format, without initial 4-bytes magic number. + * Useful to save 4 bytes per generated frame. + * Decoder cannot recognise automatically this format, requiring instructions. */ +} ZSTD_format_e; + +typedef enum { + /* compression format */ + ZSTD_p_format = 10, /* See ZSTD_format_e enum definition. + * Cast selected format as unsigned for ZSTD_CCtx_setParameter() compatibility. */ + + /* compression parameters */ + ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table + * Default level is ZSTD_CLEVEL_DEFAULT==3. + * Special: value 0 means "do not change cLevel". + * Note 1 : it's possible to pass a negative compression level by casting it to unsigned type. + * Note 2 : setting a level sets all default values of other compression parameters. + * Note 3 : setting compressionLevel automatically updates ZSTD_p_compressLiterals. */ + ZSTD_p_windowLog, /* Maximum allowed back-reference distance, expressed as power of 2. + * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX. + * Special: value 0 means "use default windowLog". + * Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27) + * requires explicitly allowing such window size during decompression stage. */ + ZSTD_p_hashLog, /* Size of the probe table, as a power of 2. + * Resulting table size is (1 << (hashLog+2)). + * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX. + * Larger tables improve compression ratio of strategies <= dFast, + * and improve speed of strategies > dFast. + * Special: value 0 means "use default hashLog". */ + ZSTD_p_chainLog, /* Size of the full-search table, as a power of 2. + * Resulting table size is (1 << (chainLog+2)). + * Larger tables result in better and slower compression. + * This parameter is useless when using "fast" strategy. + * Special: value 0 means "use default chainLog". */ + ZSTD_p_searchLog, /* Number of search attempts, as a power of 2. + * More attempts result in better and slower compression. + * This parameter is useless when using "fast" and "dFast" strategies. + * Special: value 0 means "use default searchLog". */ + ZSTD_p_minMatch, /* Minimum size of searched matches (note : repCode matches can be smaller). + * Larger values make faster compression and decompression, but decrease ratio. + * Must be clamped between ZSTD_SEARCHLENGTH_MIN and ZSTD_SEARCHLENGTH_MAX. + * Note that currently, for all strategies < btopt, effective minimum is 4. + * , for all strategies > fast, effective maximum is 6. + * Special: value 0 means "use default minMatchLength". */ + ZSTD_p_targetLength, /* Impact of this field depends on strategy. + * For strategies btopt & btultra: + * Length of Match considered "good enough" to stop search. + * Larger values make compression stronger, and slower. + * For strategy fast: + * Distance between match sampling. + * Larger values make compression faster, and weaker. + * Special: value 0 means "use default targetLength". */ + ZSTD_p_compressionStrategy, /* See ZSTD_strategy enum definition. + * Cast selected strategy as unsigned for ZSTD_CCtx_setParameter() compatibility. + * The higher the value of selected strategy, the more complex it is, + * resulting in stronger and slower compression. + * Special: value 0 means "use default strategy". */ + + ZSTD_p_enableLongDistanceMatching=160, /* Enable long distance matching. + * This parameter is designed to improve compression ratio + * for large inputs, by finding large matches at long distance. + * It increases memory usage and window size. + * Note: enabling this parameter increases ZSTD_p_windowLog to 128 MB + * except when expressly set to a different value. */ + ZSTD_p_ldmHashLog, /* Size of the table for long distance matching, as a power of 2. + * Larger values increase memory usage and compression ratio, + * but decrease compression speed. + * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX + * default: windowlog - 7. + * Special: value 0 means "automatically determine hashlog". */ + ZSTD_p_ldmMinMatch, /* Minimum match size for long distance matcher. + * Larger/too small values usually decrease compression ratio. + * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX. + * Special: value 0 means "use default value" (default: 64). */ + ZSTD_p_ldmBucketSizeLog, /* Log size of each bucket in the LDM hash table for collision resolution. + * Larger values improve collision resolution but decrease compression speed. + * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX . + * Special: value 0 means "use default value" (default: 3). */ + ZSTD_p_ldmHashEveryLog, /* Frequency of inserting/looking up entries in the LDM hash table. + * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN). + * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage. + * Larger values improve compression speed. + * Deviating far from default value will likely result in a compression ratio decrease. + * Special: value 0 means "automatically determine hashEveryLog". */ + + /* frame parameters */ + ZSTD_p_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1) + * Content size must be known at the beginning of compression, + * it is provided using ZSTD_CCtx_setPledgedSrcSize() */ + ZSTD_p_checksumFlag, /* A 32-bits checksum of content is written at end of frame (default:0) */ + ZSTD_p_dictIDFlag, /* When applicable, dictionary's ID is written into frame header (default:1) */ + + /* multi-threading parameters */ + /* These parameters are only useful if multi-threading is enabled (ZSTD_MULTITHREAD). + * They return an error otherwise. */ + ZSTD_p_nbWorkers=400, /* Select how many threads will be spawned to compress in parallel. + * When nbWorkers >= 1, triggers asynchronous mode : + * ZSTD_compress_generic() consumes some input, flush some output if possible, and immediately gives back control to caller, + * while compression work is performed in parallel, within worker threads. + * (note : a strong exception to this rule is when first invocation sets ZSTD_e_end : it becomes a blocking call). + * More workers improve speed, but also increase memory usage. + * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */ + ZSTD_p_jobSize, /* Size of a compression job. This value is enforced only in non-blocking mode. + * Each compression job is completed in parallel, so this value indirectly controls the nb of active threads. + * 0 means default, which is dynamically determined based on compression parameters. + * Job size must be a minimum of overlapSize, or 1 MB, whichever is largest. + * The minimum size is automatically and transparently enforced */ + ZSTD_p_overlapSizeLog, /* Size of previous input reloaded at the beginning of each job. + * 0 => no overlap, 6(default) => use 1/8th of windowSize, >=9 => use full windowSize */ + + /* =================================================================== */ + /* experimental parameters - no stability guaranteed */ + /* =================================================================== */ + + ZSTD_p_compressLiterals=1000, /* control huffman compression of literals (enabled) by default. + * disabling it improves speed and decreases compression ratio by a large amount. + * note : this setting is automatically updated when changing compression level. + * positive compression levels set ZSTD_p_compressLiterals to 1. + * negative compression levels set ZSTD_p_compressLiterals to 0. */ + + ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize, + * even when referencing into Dictionary content (default:0) */ + +} ZSTD_cParameter; + + +/*! ZSTD_CCtx_setParameter() : + * Set one compression parameter, selected by enum ZSTD_cParameter. + * Setting a parameter is generally only possible during frame initialization (before starting compression), + * except for a few exceptions which can be updated during compression: compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy. + * Note : when `value` is an enum, cast it to unsigned for proper type checking. + * @result : informational value (typically, value being set clamped correctly), + * or an error code (which can be tested with ZSTD_isError()). */ +ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value); + +/*! ZSTD_CCtx_setPledgedSrcSize() : + * Total input data size to be compressed as a single frame. + * This value will be controlled at the end, and result in error if not respected. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Note 1 : 0 means zero, empty. + * In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN. + * ZSTD_CONTENTSIZE_UNKNOWN is default value for any new compression job. + * Note 2 : If all data is provided and consumed in a single round, + * this value is overriden by srcSize instead. */ +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize); + +/*! ZSTD_CCtx_loadDictionary() : + * Create an internal CDict from `dict` buffer. + * Decompression will have to use same dictionary. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special: Adding a NULL (or 0-size) dictionary invalidates previous dictionary, + * meaning "return to no-dictionary mode". + * Note 1 : Dictionary will be used for all future compression jobs. + * To return to "no-dictionary" situation, load a NULL dictionary + * Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters. + * For this reason, compression parameters cannot be changed anymore after loading a dictionary. + * It's also a CPU consuming operation, with non-negligible impact on latency. + * Note 3 :`dict` content will be copied internally. + * Use ZSTD_CCtx_loadDictionary_byReference() to reference dictionary content instead. + * In such a case, dictionary buffer must outlive its users. + * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() + * to precisely select how dictionary content must be interpreted. */ +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); + + +/*! ZSTD_CCtx_refCDict() : + * Reference a prepared dictionary, to be used for all next compression jobs. + * Note that compression parameters are enforced from within CDict, + * and supercede any compression parameter previously set within CCtx. + * The dictionary will remain valid for future compression jobs using same CCtx. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special : adding a NULL CDict means "return to no-dictionary mode". + * Note 1 : Currently, only one dictionary can be managed. + * Adding a new dictionary effectively "discards" any previous one. + * Note 2 : CDict is just referenced, its lifetime must outlive CCtx. */ +ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); + +/*! ZSTD_CCtx_refPrefix() : + * Reference a prefix (single-usage dictionary) for next compression job. + * Decompression need same prefix to properly regenerate data. + * Prefix is **only used once**. Tables are discarded at end of compression job. + * Subsequent compression jobs will be done without prefix (if none is explicitly referenced). + * If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_CDict instead. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary + * Note 1 : Prefix buffer is referenced. It must outlive compression job. + * Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters. + * It's a CPU consuming operation, with non-negligible impact on latency. + * Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent). + * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode. */ +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize); +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); + +/*! ZSTD_CCtx_reset() : + * Return a CCtx to clean state. + * Useful after an error, or to interrupt an ongoing compression job and start a new one. + * Any internal data not yet flushed is cancelled. + * Dictionary (if any) is dropped. + * All parameters are back to default values. + * It's possible to modify compression parameters after a reset. + */ +ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx); + + + +typedef enum { + ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal conditions */ + ZSTD_e_flush, /* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */ + ZSTD_e_end /* flush any remaining data and close current frame. Any additional data starts a new frame. */ +} ZSTD_EndDirective; + +/*! ZSTD_compress_generic() : + * Behave about the same as ZSTD_compressStream. To note : + * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter() + * - Compression parameters cannot be changed once compression is started. + * - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize + * - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit. + * - In single-thread mode (default), function is blocking : it completed its job before returning to caller. + * - In multi-thread mode, function is non-blocking : it just acquires a copy of input, and distribute job to internal worker threads, + * and then immediately returns, just indicating that there is some data remaining to be flushed. + * The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte. + * - Exception : in multi-threading mode, if the first call requests a ZSTD_e_end directive, it is blocking : it will complete compression before giving back control to caller. + * - @return provides a minimum amount of data remaining to be flushed from internal buffers + * or an error code, which can be tested using ZSTD_isError(). + * if @return != 0, flush is not fully completed, there is still some data left within internal buffers. + * This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers. + * For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed. + * - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0), + * only ZSTD_e_end or ZSTD_e_flush operations are allowed. + * Before starting a new compression job, or changing compression parameters, + * it is required to fully flush internal buffers. + */ +ZSTDLIB_API size_t ZSTD_compress_generic (ZSTD_CCtx* cctx, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective endOp); + + +/*! ZSTD_compress_generic_simpleArgs() : + * Same as ZSTD_compress_generic(), + * but using only integral types as arguments. + * Argument list is larger than ZSTD_{in,out}Buffer, + * but can be helpful for binders from dynamic languages + * which have troubles handling structures containing memory pointers. + */ +ZSTDLIB_API size_t ZSTD_compress_generic_simpleArgs ( + ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, size_t* dstPos, + const void* src, size_t srcSize, size_t* srcPos, + ZSTD_EndDirective endOp); + + +/*! ZSTD_CCtx_params : + * Quick howto : + * - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure + * - ZSTD_CCtxParam_setParameter() : Push parameters one by one into + * an existing ZSTD_CCtx_params structure. + * This is similar to + * ZSTD_CCtx_setParameter(). + * - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to + * an existing CCtx. + * These parameters will be applied to + * all subsequent compression jobs. + * - ZSTD_compress_generic() : Do compression using the CCtx. + * - ZSTD_freeCCtxParams() : Free the memory. + * + * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() + * for static allocation for single-threaded compression. + */ +ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); +ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); + + +/*! ZSTD_CCtxParams_reset() : + * Reset params to default values. + */ +ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); + +/*! ZSTD_CCtxParams_init() : + * Initializes the compression parameters of cctxParams according to + * compression level. All other parameters are reset to their default values. + */ +ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); + +/*! ZSTD_CCtxParams_init_advanced() : + * Initializes the compression and frame parameters of cctxParams according to + * params. All other parameters are reset to their default values. + */ +ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); + + +/*! ZSTD_CCtxParam_setParameter() : + * Similar to ZSTD_CCtx_setParameter. + * Set one compression parameter, selected by enum ZSTD_cParameter. + * Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams(). + * Note : when `value` is an enum, cast it to unsigned for proper type checking. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + */ +ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value); + +/*! ZSTD_CCtx_setParametersUsingCCtxParams() : + * Apply a set of ZSTD_CCtx_params to the compression context. + * This can be done even after compression is started, + * if nbWorkers==0, this will have no impact until a new compression is started. + * if nbWorkers>=1, new parameters will be picked up at next job, + * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated). + */ +ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( + ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params); + + +/*=== Advanced parameters for decompression API ===*/ + +/* The following parameters must be set after creating a ZSTD_DCtx* (or ZSTD_DStream*) object, + * but before starting decompression of a frame. + */ + +/*! ZSTD_DCtx_loadDictionary() : + * Create an internal DDict from dict buffer, + * to be used to decompress next frames. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, + * meaning "return to no-dictionary mode". + * Note 1 : `dict` content will be copied internally. + * Use ZSTD_DCtx_loadDictionary_byReference() + * to reference dictionary content instead. + * In which case, the dictionary buffer must outlive its users. + * Note 2 : Loading a dictionary involves building tables, + * which has a non-negligible impact on CPU usage and latency. + * Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to select + * how dictionary content will be interpreted and loaded. + */ +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); + + +/*! ZSTD_DCtx_refDDict() : + * Reference a prepared dictionary, to be used to decompress next frames. + * The dictionary remains active for decompression of future frames using same DCtx. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Note 1 : Currently, only one dictionary can be managed. + * Referencing a new dictionary effectively "discards" any previous one. + * Special : adding a NULL DDict means "return to no-dictionary mode". + * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. + */ +ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); + + +/*! ZSTD_DCtx_refPrefix() : + * Reference a prefix (single-usage dictionary) for next compression job. + * Prefix is **only used once**. It must be explicitly referenced before each frame. + * If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_DDict instead. + * @result : 0, or an error code (which can be tested with ZSTD_isError()). + * Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary + * Note 2 : Prefix buffer is referenced. It must outlive compression job. + * Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent). + * Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode. + * Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost. + */ +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize); +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); + + +/*! ZSTD_DCtx_setMaxWindowSize() : + * Refuses allocating internal buffers for frames requiring a window size larger than provided limit. + * This is useful to prevent a decoder context from reserving too much memory for itself (potential attack scenario). + * This parameter is only useful in streaming mode, since no internal buffer is allocated in direct mode. + * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_MAX) + * @return : 0, or an error code (which can be tested using ZSTD_isError()). + */ +ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); + + +/*! ZSTD_DCtx_setFormat() : + * Instruct the decoder context about what kind of data to decode next. + * This instruction is mandatory to decode data without a fully-formed header, + * such ZSTD_f_zstd1_magicless for example. + * @return : 0, or an error code (which can be tested using ZSTD_isError()). + */ +ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); + + +/*! ZSTD_decompress_generic() : + * Behave the same as ZSTD_decompressStream. + * Decompression parameters cannot be changed once decompression is started. + * @return : an error code, which can be tested using ZSTD_isError() + * if >0, a hint, nb of expected input bytes for next invocation. + * `0` means : a frame has just been fully decoded and flushed. + */ +ZSTDLIB_API size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input); + + +/*! ZSTD_decompress_generic_simpleArgs() : + * Same as ZSTD_decompress_generic(), + * but using only integral types as arguments. + * Argument list is larger than ZSTD_{in,out}Buffer, + * but can be helpful for binders from dynamic languages + * which have troubles handling structures containing memory pointers. + */ +ZSTDLIB_API size_t ZSTD_decompress_generic_simpleArgs ( + ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, size_t* dstPos, + const void* src, size_t srcSize, size_t* srcPos); + + +/*! ZSTD_DCtx_reset() : + * Return a DCtx to clean state. + * If a decompression was ongoing, any internal data not yet flushed is cancelled. + * All parameters are back to default values, including sticky ones. + * Dictionary (if any) is dropped. + * Parameters can be modified again after a reset. + */ +ZSTDLIB_API void ZSTD_DCtx_reset(ZSTD_DCtx* dctx); + + + +/* ============================ */ +/** Block level API */ +/* ============================ */ + +/*! + Block functions produce and decode raw zstd blocks, without frame metadata. + Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes). + User will have to take in charge required information to regenerate data, such as compressed and content sizes. + + A few rules to respect : + - Compressing and decompressing require a context structure + + Use ZSTD_createCCtx() and ZSTD_createDCtx() + - It is necessary to init context before starting + + compression : any ZSTD_compressBegin*() variant, including with dictionary + + decompression : any ZSTD_decompressBegin*() variant, including with dictionary + + copyCCtx() and copyDCtx() can be used too + - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + + If input is larger than a block size, it's necessary to split input data into multiple blocks + + For inputs larger than a single block size, consider using the regular ZSTD_compress() instead. + Frame metadata is not that costly, and quickly becomes negligible as source size grows larger. + - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero. + In which case, nothing is produced into `dst`. + + User must test for such outcome and deal directly with uncompressed data + + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!! + + In case of multiple successive blocks, should some of them be uncompressed, + decoder must be informed of their existence in order to follow proper history. + Use ZSTD_insertBlock() for such a case. +*/ + +#define ZSTD_BLOCKSIZELOG_MAX 17 +#define ZSTD_BLOCKSIZE_MAX (1< /* malloc, calloc, free */ +#include /* memset */ +#include "error_private.h" +#include "zstd_internal.h" + + +/*-**************************************** +* Version +******************************************/ +unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; } + +const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } + + +/*-**************************************** +* ZSTD Error Management +******************************************/ +/*! ZSTD_isError() : + * tells if a return value is an error code */ +unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } + +/*! ZSTD_getErrorName() : + * provides error code string from function result (useful for debugging) */ +const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } + +/*! ZSTD_getError() : + * convert a `size_t` function result into a proper ZSTD_errorCode enum */ +ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } + +/*! ZSTD_getErrorString() : + * provides error code string from enum */ +const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } + +/*! g_debuglog_enable : + * turn on/off debug traces (global switch) */ +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 2) +int g_debuglog_enable = 1; +#endif + + +/*=************************************************************** +* Custom allocator +****************************************************************/ +void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) + return customMem.customAlloc(customMem.opaque, size); + return malloc(size); +} + +void* ZSTD_calloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) { + /* calloc implemented as malloc+memset; + * not as efficient as calloc, but next best guess for custom malloc */ + void* const ptr = customMem.customAlloc(customMem.opaque, size); + memset(ptr, 0, size); + return ptr; + } + return calloc(1, size); +} + +void ZSTD_free(void* ptr, ZSTD_customMem customMem) +{ + if (ptr!=NULL) { + if (customMem.customFree) + customMem.customFree(customMem.opaque, ptr); + else + free(ptr); + } +} diff --git a/vendor/github.com/DataDog/zstd/zstd_compress.c b/vendor/github.com/DataDog/zstd/zstd_compress.c new file mode 100644 index 00000000000..2aa26da4cd4 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_compress.c @@ -0,0 +1,3449 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/*-************************************* +* Tuning parameters +***************************************/ +#ifndef ZSTD_CLEVEL_DEFAULT +# define ZSTD_CLEVEL_DEFAULT 3 +#endif + + +/*-************************************* +* Dependencies +***************************************/ +#include /* memset */ +#include "cpu.h" +#include "mem.h" +#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#include "zstd_compress_internal.h" +#include "zstd_fast.h" +#include "zstd_double_fast.h" +#include "zstd_lazy.h" +#include "zstd_opt.h" +#include "zstd_ldm.h" + + +/*-************************************* +* Helper functions +***************************************/ +size_t ZSTD_compressBound(size_t srcSize) { + return ZSTD_COMPRESSBOUND(srcSize); +} + + +/*-************************************* +* Context memory management +***************************************/ +struct ZSTD_CDict_s { + void* dictBuffer; + const void* dictContent; + size_t dictContentSize; + void* workspace; + size_t workspaceSize; + ZSTD_matchState_t matchState; + ZSTD_compressedBlockState_t cBlockState; + ZSTD_compressionParameters cParams; + ZSTD_customMem customMem; + U32 dictID; +}; /* typedef'd to ZSTD_CDict within "zstd.h" */ + +ZSTD_CCtx* ZSTD_createCCtx(void) +{ + return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); +} + +ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) +{ + ZSTD_STATIC_ASSERT(zcss_init==0); + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_calloc(sizeof(ZSTD_CCtx), customMem); + if (!cctx) return NULL; + cctx->customMem = customMem; + cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; + cctx->requestedParams.fParams.contentSizeFlag = 1; + cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + return cctx; + } +} + +ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize) +{ + ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace; + if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ + if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ + memset(workspace, 0, workspaceSize); /* may be a bit generous, could memset be smaller ? */ + cctx->staticSize = workspaceSize; + cctx->workSpace = (void*)(cctx+1); + cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx); + + /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ + if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL; + assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ + cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace; + cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1; + { + void* const ptr = cctx->blockState.nextCBlock + 1; + cctx->entropyWorkspace = (U32*)ptr; + } + cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + return cctx; +} + +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return 0; /* support free on NULL */ + if (cctx->staticSize) return ERROR(memory_allocation); /* not compatible with static CCtx */ + ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL; + ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL; +#ifdef ZSTD_MULTITHREAD + ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL; +#endif + ZSTD_free(cctx, cctx->customMem); + return 0; /* reserved as a potential error code in the future */ +} + + +static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) +{ +#ifdef ZSTD_MULTITHREAD + return ZSTDMT_sizeof_CCtx(cctx->mtctx); +#else + (void) cctx; + return 0; +#endif +} + + +size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return 0; /* support sizeof on NULL */ + return sizeof(*cctx) + cctx->workSpaceSize + + ZSTD_sizeof_CDict(cctx->cdictLocal) + + ZSTD_sizeof_mtctx(cctx); +} + +size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) +{ + return ZSTD_sizeof_CCtx(zcs); /* same object */ +} + +/* private API call, for dictBuilder only */ +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } + +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( + const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize) +{ + ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize); + if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; + if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog; + if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog; + if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog; + if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog; + if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength; + if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength; + if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy; + return cParams; +} + +static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( + ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params cctxParams; + memset(&cctxParams, 0, sizeof(cctxParams)); + cctxParams.cParams = cParams; + cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ + assert(!ZSTD_checkCParams(cParams)); + cctxParams.fParams.contentSizeFlag = 1; + return cctxParams; +} + +static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( + ZSTD_customMem customMem) +{ + ZSTD_CCtx_params* params; + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + params = (ZSTD_CCtx_params*)ZSTD_calloc( + sizeof(ZSTD_CCtx_params), customMem); + if (!params) { return NULL; } + params->customMem = customMem; + params->compressionLevel = ZSTD_CLEVEL_DEFAULT; + params->fParams.contentSizeFlag = 1; + return params; +} + +ZSTD_CCtx_params* ZSTD_createCCtxParams(void) +{ + return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); +} + +size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) +{ + if (params == NULL) { return 0; } + ZSTD_free(params, params->customMem); + return 0; +} + +size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) +{ + return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); +} + +size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { + if (!cctxParams) { return ERROR(GENERIC); } + memset(cctxParams, 0, sizeof(*cctxParams)); + cctxParams->compressionLevel = compressionLevel; + cctxParams->fParams.contentSizeFlag = 1; + return 0; +} + +size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) +{ + if (!cctxParams) { return ERROR(GENERIC); } + CHECK_F( ZSTD_checkCParams(params.cParams) ); + memset(cctxParams, 0, sizeof(*cctxParams)); + cctxParams->cParams = params.cParams; + cctxParams->fParams = params.fParams; + cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ + assert(!ZSTD_checkCParams(params.cParams)); + return 0; +} + +/* ZSTD_assignParamsToCCtxParams() : + * params is presumed valid at this stage */ +static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams( + ZSTD_CCtx_params cctxParams, ZSTD_parameters params) +{ + ZSTD_CCtx_params ret = cctxParams; + ret.cParams = params.cParams; + ret.fParams = params.fParams; + ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ + assert(!ZSTD_checkCParams(params.cParams)); + return ret; +} + +#define CLAMPCHECK(val,min,max) { \ + if (((val)<(min)) | ((val)>(max))) { \ + return ERROR(parameter_outOfBound); \ +} } + + +static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) +{ + switch(param) + { + case ZSTD_p_compressionLevel: + case ZSTD_p_hashLog: + case ZSTD_p_chainLog: + case ZSTD_p_searchLog: + case ZSTD_p_minMatch: + case ZSTD_p_targetLength: + case ZSTD_p_compressionStrategy: + case ZSTD_p_compressLiterals: + return 1; + + case ZSTD_p_format: + case ZSTD_p_windowLog: + case ZSTD_p_contentSizeFlag: + case ZSTD_p_checksumFlag: + case ZSTD_p_dictIDFlag: + case ZSTD_p_forceMaxWindow : + case ZSTD_p_nbWorkers: + case ZSTD_p_jobSize: + case ZSTD_p_overlapSizeLog: + case ZSTD_p_enableLongDistanceMatching: + case ZSTD_p_ldmHashLog: + case ZSTD_p_ldmMinMatch: + case ZSTD_p_ldmBucketSizeLog: + case ZSTD_p_ldmHashEveryLog: + default: + return 0; + } +} + +size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value) +{ + DEBUGLOG(4, "ZSTD_CCtx_setParameter (%u, %u)", (U32)param, value); + if (cctx->streamStage != zcss_init) { + if (ZSTD_isUpdateAuthorized(param)) { + cctx->cParamsChanged = 1; + } else { + return ERROR(stage_wrong); + } } + + switch(param) + { + case ZSTD_p_format : + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_compressionLevel: + if (cctx->cdict) return ERROR(stage_wrong); + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_windowLog: + case ZSTD_p_hashLog: + case ZSTD_p_chainLog: + case ZSTD_p_searchLog: + case ZSTD_p_minMatch: + case ZSTD_p_targetLength: + case ZSTD_p_compressionStrategy: + if (cctx->cdict) return ERROR(stage_wrong); + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_compressLiterals: + case ZSTD_p_contentSizeFlag: + case ZSTD_p_checksumFlag: + case ZSTD_p_dictIDFlag: + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_forceMaxWindow : /* Force back-references to remain < windowSize, + * even when referencing into Dictionary content. + * default : 0 when using a CDict, 1 when using a Prefix */ + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_nbWorkers: + if ((value>0) && cctx->staticSize) { + return ERROR(parameter_unsupported); /* MT not compatible with static alloc */ + } + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_jobSize: + case ZSTD_p_overlapSizeLog: + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + case ZSTD_p_enableLongDistanceMatching: + case ZSTD_p_ldmHashLog: + case ZSTD_p_ldmMinMatch: + case ZSTD_p_ldmBucketSizeLog: + case ZSTD_p_ldmHashEveryLog: + if (cctx->cdict) return ERROR(stage_wrong); + return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value); + + default: return ERROR(parameter_unsupported); + } +} + +size_t ZSTD_CCtxParam_setParameter( + ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned value) +{ + DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%u, %u)", (U32)param, value); + switch(param) + { + case ZSTD_p_format : + if (value > (unsigned)ZSTD_f_zstd1_magicless) + return ERROR(parameter_unsupported); + CCtxParams->format = (ZSTD_format_e)value; + return (size_t)CCtxParams->format; + + case ZSTD_p_compressionLevel : { + int cLevel = (int)value; /* cast expected to restore negative sign */ + if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel(); + if (cLevel) { /* 0 : does not change current level */ + CCtxParams->disableLiteralCompression = (cLevel<0); /* negative levels disable huffman */ + CCtxParams->compressionLevel = cLevel; + } + if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel; + return 0; /* return type (size_t) cannot represent negative values */ + } + + case ZSTD_p_windowLog : + if (value>0) /* 0 => use default */ + CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); + CCtxParams->cParams.windowLog = value; + return CCtxParams->cParams.windowLog; + + case ZSTD_p_hashLog : + if (value>0) /* 0 => use default */ + CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); + CCtxParams->cParams.hashLog = value; + return CCtxParams->cParams.hashLog; + + case ZSTD_p_chainLog : + if (value>0) /* 0 => use default */ + CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); + CCtxParams->cParams.chainLog = value; + return CCtxParams->cParams.chainLog; + + case ZSTD_p_searchLog : + if (value>0) /* 0 => use default */ + CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); + CCtxParams->cParams.searchLog = value; + return value; + + case ZSTD_p_minMatch : + if (value>0) /* 0 => use default */ + CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); + CCtxParams->cParams.searchLength = value; + return CCtxParams->cParams.searchLength; + + case ZSTD_p_targetLength : + /* all values are valid. 0 => use default */ + CCtxParams->cParams.targetLength = value; + return CCtxParams->cParams.targetLength; + + case ZSTD_p_compressionStrategy : + if (value>0) /* 0 => use default */ + CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra); + CCtxParams->cParams.strategy = (ZSTD_strategy)value; + return (size_t)CCtxParams->cParams.strategy; + + case ZSTD_p_compressLiterals: + CCtxParams->disableLiteralCompression = !value; + return !CCtxParams->disableLiteralCompression; + + case ZSTD_p_contentSizeFlag : + /* Content size written in frame header _when known_ (default:1) */ + DEBUGLOG(4, "set content size flag = %u", (value>0)); + CCtxParams->fParams.contentSizeFlag = value > 0; + return CCtxParams->fParams.contentSizeFlag; + + case ZSTD_p_checksumFlag : + /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ + CCtxParams->fParams.checksumFlag = value > 0; + return CCtxParams->fParams.checksumFlag; + + case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ + DEBUGLOG(4, "set dictIDFlag = %u", (value>0)); + CCtxParams->fParams.noDictIDFlag = !value; + return !CCtxParams->fParams.noDictIDFlag; + + case ZSTD_p_forceMaxWindow : + CCtxParams->forceWindow = (value > 0); + return CCtxParams->forceWindow; + + case ZSTD_p_nbWorkers : +#ifndef ZSTD_MULTITHREAD + if (value>0) return ERROR(parameter_unsupported); + return 0; +#else + return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value); +#endif + + case ZSTD_p_jobSize : +#ifndef ZSTD_MULTITHREAD + return ERROR(parameter_unsupported); +#else + return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value); +#endif + + case ZSTD_p_overlapSizeLog : +#ifndef ZSTD_MULTITHREAD + return ERROR(parameter_unsupported); +#else + return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value); +#endif + + case ZSTD_p_enableLongDistanceMatching : + CCtxParams->ldmParams.enableLdm = (value>0); + return CCtxParams->ldmParams.enableLdm; + + case ZSTD_p_ldmHashLog : + if (value>0) /* 0 ==> auto */ + CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); + CCtxParams->ldmParams.hashLog = value; + return CCtxParams->ldmParams.hashLog; + + case ZSTD_p_ldmMinMatch : + if (value>0) /* 0 ==> default */ + CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX); + CCtxParams->ldmParams.minMatchLength = value; + return CCtxParams->ldmParams.minMatchLength; + + case ZSTD_p_ldmBucketSizeLog : + if (value > ZSTD_LDM_BUCKETSIZELOG_MAX) + return ERROR(parameter_outOfBound); + CCtxParams->ldmParams.bucketSizeLog = value; + return CCtxParams->ldmParams.bucketSizeLog; + + case ZSTD_p_ldmHashEveryLog : + if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) + return ERROR(parameter_outOfBound); + CCtxParams->ldmParams.hashEveryLog = value; + return CCtxParams->ldmParams.hashEveryLog; + + default: return ERROR(parameter_unsupported); + } +} + +/** ZSTD_CCtx_setParametersUsingCCtxParams() : + * just applies `params` into `cctx` + * no action is performed, parameters are merely stored. + * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. + * This is possible even if a compression is ongoing. + * In which case, new parameters will be applied on the fly, starting with next compression job. + */ +size_t ZSTD_CCtx_setParametersUsingCCtxParams( + ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) +{ + if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); + if (cctx->cdict) return ERROR(stage_wrong); + + cctx->requestedParams = *params; + return 0; +} + +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); + if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); + cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; + return 0; +} + +size_t ZSTD_CCtx_loadDictionary_advanced( + ZSTD_CCtx* cctx, const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) +{ + if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); + if (cctx->staticSize) return ERROR(memory_allocation); /* no malloc for static CCtx */ + DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); + ZSTD_freeCDict(cctx->cdictLocal); /* in case one already exists */ + if (dict==NULL || dictSize==0) { /* no dictionary mode */ + cctx->cdictLocal = NULL; + cctx->cdict = NULL; + } else { + ZSTD_compressionParameters const cParams = + ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize); + cctx->cdictLocal = ZSTD_createCDict_advanced( + dict, dictSize, + dictLoadMethod, dictContentType, + cParams, cctx->customMem); + cctx->cdict = cctx->cdictLocal; + if (cctx->cdictLocal == NULL) + return ERROR(memory_allocation); + } + return 0; +} + +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference( + ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + return ZSTD_CCtx_loadDictionary_advanced( + cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); +} + +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + return ZSTD_CCtx_loadDictionary_advanced( + cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); +} + + +size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); + cctx->cdict = cdict; + memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* exclusive */ + return 0; +} + +size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) +{ + return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); +} + +size_t ZSTD_CCtx_refPrefix_advanced( + ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) +{ + if (cctx->streamStage != zcss_init) return ERROR(stage_wrong); + cctx->cdict = NULL; /* prefix discards any prior cdict */ + cctx->prefixDict.dict = prefix; + cctx->prefixDict.dictSize = prefixSize; + cctx->prefixDict.dictContentType = dictContentType; + return 0; +} + +static void ZSTD_startNewCompression(ZSTD_CCtx* cctx) +{ + cctx->streamStage = zcss_init; + cctx->pledgedSrcSizePlusOne = 0; +} + +/*! ZSTD_CCtx_reset() : + * Also dumps dictionary */ +void ZSTD_CCtx_reset(ZSTD_CCtx* cctx) +{ + ZSTD_startNewCompression(cctx); + cctx->cdict = NULL; +} + +/** ZSTD_checkCParams() : + control CParam values remain within authorized range. + @return : 0, or an error code if one value is beyond authorized range */ +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) +{ + CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); + CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); + CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); + CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); + CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); + if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN) + return ERROR(parameter_unsupported); + if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) + return ERROR(parameter_unsupported); + return 0; +} + +/** ZSTD_clampCParams() : + * make CParam values within valid range. + * @return : valid CParams */ +static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) +{ +# define CLAMP(val,min,max) { \ + if (valmax) val=max; \ + } + CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); + CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); + CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); + CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); + CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); + if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN) cParams.targetLength = ZSTD_TARGETLENGTH_MIN; + if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) cParams.strategy = ZSTD_btultra; + return cParams; +} + +/** ZSTD_cycleLog() : + * condition for correct operation : hashLog > 1 */ +static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) +{ + U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); + return hashLog - btScale; +} + +/** ZSTD_adjustCParams_internal() : + optimize `cPar` for a given input (`srcSize` and `dictSize`). + mostly downsizing to reduce memory consumption and initialization latency. + Both `srcSize` and `dictSize` are optional (use 0 if unknown). + Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */ +ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) +{ + static const U64 minSrcSize = 513; /* (1<<9) + 1 */ + static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); + assert(ZSTD_checkCParams(cPar)==0); + + if (dictSize && (srcSize+1<2) /* srcSize unknown */ ) + srcSize = minSrcSize; /* presumed small when there is a dictionary */ + else if (srcSize == 0) + srcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* 0 == unknown : presumed large */ + + /* resize windowLog if input is small enough, to use less memory */ + if ( (srcSize < maxWindowResize) + && (dictSize < maxWindowResize) ) { + U32 const tSize = (U32)(srcSize + dictSize); + static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; + U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : + ZSTD_highbit32(tSize-1) + 1; + if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; + } + if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog; + { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + if (cycleLog > cPar.windowLog) + cPar.chainLog -= (cycleLog - cPar.windowLog); + } + + if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) + cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ + + return cPar; +} + +ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) +{ + cPar = ZSTD_clampCParams(cPar); + return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize); +} + +static size_t ZSTD_sizeof_matchState(ZSTD_compressionParameters const* cParams, const U32 forCCtx) +{ + size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); + size_t const hSize = ((size_t)1) << cParams->hashLog; + U32 const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; + size_t const h3Size = ((size_t)1) << hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<strategy == ZSTD_btopt) || + (cParams->strategy == ZSTD_btultra))) + ? optPotentialSpace + : 0; + DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", + (U32)chainSize, (U32)hSize, (U32)h3Size); + return tableSpace + optSpace; +} + +size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) +{ + /* Estimate CCtx size is supported for single-threaded compression only. */ + if (params->nbWorkers > 0) { return ERROR(GENERIC); } + { ZSTD_compressionParameters const cParams = + ZSTD_getCParamsFromCCtxParams(params, 0, 0); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); + U32 const divider = (cParams.searchLength==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = blockSize + 11*maxNbSeq; + size_t const entropySpace = HUF_WORKSPACE_SIZE; + size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t); + size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1); + + size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams); + size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq); + + size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace + + matchStateSize + ldmSpace + ldmSeqSpace; + + DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx)); + DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace); + return sizeof(ZSTD_CCtx) + neededSpace; + } +} + +size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); + return ZSTD_estimateCCtxSize_usingCCtxParams(¶ms); +} + +static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) +{ + ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0); + return ZSTD_estimateCCtxSize_usingCParams(cParams); +} + +size_t ZSTD_estimateCCtxSize(int compressionLevel) +{ + int level; + size_t memBudget = 0; + for (level=1; level<=compressionLevel; level++) { + size_t const newMB = ZSTD_estimateCCtxSize_internal(level); + if (newMB > memBudget) memBudget = newMB; + } + return memBudget; +} + +size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) +{ + if (params->nbWorkers > 0) { return ERROR(GENERIC); } + { size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog); + size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize; + size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; + size_t const streamingSize = inBuffSize + outBuffSize; + + return CCtxSize + streamingSize; + } +} + +size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); + return ZSTD_estimateCStreamSize_usingCCtxParams(¶ms); +} + +static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) { + ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0); + return ZSTD_estimateCStreamSize_usingCParams(cParams); +} + +size_t ZSTD_estimateCStreamSize(int compressionLevel) { + int level; + size_t memBudget = 0; + for (level=1; level<=compressionLevel; level++) { + size_t const newMB = ZSTD_estimateCStreamSize_internal(level); + if (newMB > memBudget) memBudget = newMB; + } + return memBudget; +} + +/* ZSTD_getFrameProgression(): + * tells how much data has been consumed (input) and produced (output) for current frame. + * able to count progression inside worker threads (non-blocking mode). + */ +ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx) +{ +#ifdef ZSTD_MULTITHREAD + if (cctx->appliedParams.nbWorkers > 0) { + return ZSTDMT_getFrameProgression(cctx->mtctx); + } +#endif + { ZSTD_frameProgression fp; + size_t const buffered = (cctx->inBuff == NULL) ? 0 : + cctx->inBuffPos - cctx->inToCompress; + if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress); + assert(buffered <= ZSTD_BLOCKSIZE_MAX); + fp.ingested = cctx->consumedSrcSize + buffered; + fp.consumed = cctx->consumedSrcSize; + fp.produced = cctx->producedCSize; + return fp; +} } + + +static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1, + ZSTD_compressionParameters cParams2) +{ + return (cParams1.hashLog == cParams2.hashLog) + & (cParams1.chainLog == cParams2.chainLog) + & (cParams1.strategy == cParams2.strategy) /* opt parser space */ + & ((cParams1.searchLength==3) == (cParams2.searchLength==3)); /* hashlog3 space */ +} + +/** The parameters are equivalent if ldm is not enabled in both sets or + * all the parameters are equivalent. */ +static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1, + ldmParams_t ldmParams2) +{ + return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) || + (ldmParams1.enableLdm == ldmParams2.enableLdm && + ldmParams1.hashLog == ldmParams2.hashLog && + ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog && + ldmParams1.minMatchLength == ldmParams2.minMatchLength && + ldmParams1.hashEveryLog == ldmParams2.hashEveryLog); +} + +typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e; + +/* ZSTD_sufficientBuff() : + * check internal buffers exist for streaming if buffPol == ZSTDb_buffered . + * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */ +static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t blockSize1, + ZSTD_buffered_policy_e buffPol2, + ZSTD_compressionParameters cParams2, + U64 pledgedSrcSize) +{ + size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize)); + size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2); + size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0; + DEBUGLOG(4, "ZSTD_sufficientBuff: is windowSize2=%u <= wlog1=%u", + (U32)windowSize2, cParams2.windowLog); + DEBUGLOG(4, "ZSTD_sufficientBuff: is blockSize2=%u <= blockSize1=%u", + (U32)blockSize2, (U32)blockSize1); + return (blockSize2 <= blockSize1) /* seqStore space depends on blockSize */ + & (neededBufferSize2 <= bufferSize1); +} + +/** Equivalence for resetCCtx purposes */ +static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1, + ZSTD_CCtx_params params2, + size_t buffSize1, size_t blockSize1, + ZSTD_buffered_policy_e buffPol2, + U64 pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize); + return ZSTD_equivalentCParams(params1.cParams, params2.cParams) && + ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams) && + ZSTD_sufficientBuff(buffSize1, blockSize1, buffPol2, params2.cParams, pledgedSrcSize); +} + +static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) +{ + int i; + for (i = 0; i < ZSTD_REP_NUM; ++i) + bs->rep[i] = repStartValue[i]; + bs->entropy.hufCTable_repeatMode = HUF_repeat_none; + bs->entropy.offcode_repeatMode = FSE_repeat_none; + bs->entropy.matchlength_repeatMode = FSE_repeat_none; + bs->entropy.litlength_repeatMode = FSE_repeat_none; +} + +/*! ZSTD_invalidateMatchState() + * Invalidate all the matches in the match finder tables. + * Requires nextSrc and base to be set (can be NULL). + */ +static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) +{ + ZSTD_window_clear(&ms->window); + + ms->nextToUpdate = ms->window.dictLimit + 1; + ms->loadedDictEnd = 0; + ms->opt.litLengthSum = 0; /* force reset of btopt stats */ +} + +/*! ZSTD_continueCCtx() : + * reuse CCtx without reset (note : requires no dictionary) */ +static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize) +{ + size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); + DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place"); + + cctx->blockSize = blockSize; /* previous block size could be different even for same windowLog, due to pledgedSrcSize */ + cctx->appliedParams = params; + cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; + cctx->consumedSrcSize = 0; + cctx->producedCSize = 0; + if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) + cctx->appliedParams.fParams.contentSizeFlag = 0; + DEBUGLOG(4, "pledged content size : %u ; flag : %u", + (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag); + cctx->stage = ZSTDcs_init; + cctx->dictID = 0; + if (params.ldmParams.enableLdm) + ZSTD_window_clear(&cctx->ldmState.window); + ZSTD_referenceExternalSequences(cctx, NULL, 0); + ZSTD_invalidateMatchState(&cctx->blockState.matchState); + ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock); + XXH64_reset(&cctx->xxhState, 0); + return 0; +} + +typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e; + +static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, ZSTD_compressionParameters const* cParams, ZSTD_compResetPolicy_e const crp, U32 const forCCtx) +{ + size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); + size_t const hSize = ((size_t)1) << cParams->hashLog; + U32 const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; + size_t const h3Size = ((size_t)1) << hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + + assert(((size_t)ptr & 3) == 0); + + ms->hashLog3 = hashLog3; + memset(&ms->window, 0, sizeof(ms->window)); + ZSTD_invalidateMatchState(ms); + + /* opt parser space */ + if (forCCtx && ((cParams->strategy == ZSTD_btopt) | (cParams->strategy == ZSTD_btultra))) { + DEBUGLOG(4, "reserving optimal parser space"); + ms->opt.litFreq = (U32*)ptr; + ms->opt.litLengthFreq = ms->opt.litFreq + (1<opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1); + ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1); + ptr = ms->opt.offCodeFreq + (MaxOff+1); + ms->opt.matchTable = (ZSTD_match_t*)ptr; + ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1; + ms->opt.priceTable = (ZSTD_optimal_t*)ptr; + ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1; + } + + /* table Space */ + DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset); + assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ + if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace); /* reset tables only */ + ms->hashTable = (U32*)(ptr); + ms->chainTable = ms->hashTable + hSize; + ms->hashTable3 = ms->chainTable + chainSize; + ptr = ms->hashTable3 + h3Size; + + assert(((size_t)ptr & 3) == 0); + return ptr; +} + +/*! ZSTD_resetCCtx_internal() : + note : `params` are assumed fully validated at this stage */ +static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, + ZSTD_CCtx_params params, U64 pledgedSrcSize, + ZSTD_compResetPolicy_e const crp, + ZSTD_buffered_policy_e const zbuff) +{ + DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u", + (U32)pledgedSrcSize, params.cParams.windowLog); + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + + if (crp == ZSTDcrp_continue) { + if (ZSTD_equivalentParams(zc->appliedParams, params, + zc->inBuffSize, zc->blockSize, + zbuff, pledgedSrcSize)) { + DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%u)", + zc->appliedParams.cParams.windowLog, (U32)zc->blockSize); + return ZSTD_continueCCtx(zc, params, pledgedSrcSize); + } } + DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx"); + + if (params.ldmParams.enableLdm) { + /* Adjust long distance matching parameters */ + params.ldmParams.windowLog = params.cParams.windowLog; + ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); + assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); + assert(params.ldmParams.hashEveryLog < 32); + zc->ldmState.hashPower = + ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength); + } + + { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); + U32 const divider = (params.cParams.searchLength==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = blockSize + 11*maxNbSeq; + size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0; + size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0; + size_t const matchStateSize = ZSTD_sizeof_matchState(¶ms.cParams, /* forCCtx */ 1); + size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize); + void* ptr; + + /* Check if workSpace is large enough, alloc a new one if needed */ + { size_t const entropySpace = HUF_WORKSPACE_SIZE; + size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t); + size_t const bufferSpace = buffInSize + buffOutSize; + size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams); + size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq); + + size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace + + ldmSeqSpace + matchStateSize + tokenSpace + + bufferSpace; + DEBUGLOG(4, "Need %uKB workspace, including %uKB for match state, and %uKB for buffers", + (U32)(neededSpace>>10), (U32)(matchStateSize>>10), (U32)(bufferSpace>>10)); + DEBUGLOG(4, "windowSize: %u - blockSize: %u", (U32)windowSize, (U32)blockSize); + + if (zc->workSpaceSize < neededSpace) { /* too small : resize */ + DEBUGLOG(4, "Need to update workSpaceSize from %uK to %uK", + (unsigned)(zc->workSpaceSize>>10), + (unsigned)(neededSpace>>10)); + /* static cctx : no resize, error out */ + if (zc->staticSize) return ERROR(memory_allocation); + + zc->workSpaceSize = 0; + ZSTD_free(zc->workSpace, zc->customMem); + zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); + if (zc->workSpace == NULL) return ERROR(memory_allocation); + zc->workSpaceSize = neededSpace; + ptr = zc->workSpace; + + /* Statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ + assert(((size_t)zc->workSpace & 3) == 0); /* ensure correct alignment */ + assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t)); + zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace; + zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1; + ptr = zc->blockState.nextCBlock + 1; + zc->entropyWorkspace = (U32*)ptr; + } } + + /* init params */ + zc->appliedParams = params; + zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; + zc->consumedSrcSize = 0; + zc->producedCSize = 0; + if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) + zc->appliedParams.fParams.contentSizeFlag = 0; + DEBUGLOG(4, "pledged content size : %u ; flag : %u", + (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); + zc->blockSize = blockSize; + + XXH64_reset(&zc->xxhState, 0); + zc->stage = ZSTDcs_init; + zc->dictID = 0; + + ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); + + ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32; + + /* ldm hash table */ + /* initialize bucketOffsets table later for pointer alignment */ + if (params.ldmParams.enableLdm) { + size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog; + memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t)); + assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ + zc->ldmState.hashTable = (ldmEntry_t*)ptr; + ptr = zc->ldmState.hashTable + ldmHSize; + zc->ldmSequences = (rawSeq*)ptr; + ptr = zc->ldmSequences + maxNbLdmSeq; + zc->maxNbLdmSequences = maxNbLdmSeq; + + memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window)); + } + assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */ + + ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, ¶ms.cParams, crp, /* forCCtx */ 1); + + /* sequences storage */ + zc->seqStore.sequencesStart = (seqDef*)ptr; + ptr = zc->seqStore.sequencesStart + maxNbSeq; + zc->seqStore.llCode = (BYTE*) ptr; + zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; + zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; + zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; + ptr = zc->seqStore.litStart + blockSize; + + /* ldm bucketOffsets table */ + if (params.ldmParams.enableLdm) { + size_t const ldmBucketSize = + ((size_t)1) << (params.ldmParams.hashLog - + params.ldmParams.bucketSizeLog); + memset(ptr, 0, ldmBucketSize); + zc->ldmState.bucketOffsets = (BYTE*)ptr; + ptr = zc->ldmState.bucketOffsets + ldmBucketSize; + ZSTD_window_clear(&zc->ldmState.window); + } + ZSTD_referenceExternalSequences(zc, NULL, 0); + + /* buffers */ + zc->inBuffSize = buffInSize; + zc->inBuff = (char*)ptr; + zc->outBuffSize = buffOutSize; + zc->outBuff = zc->inBuff + buffInSize; + + return 0; + } +} + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { + int i; + for (i=0; iblockState.prevCBlock->rep[i] = 0; + assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); +} + +static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, + const ZSTD_CDict* cdict, + unsigned windowLog, + ZSTD_frameParameters fParams, + U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + { ZSTD_CCtx_params params = cctx->requestedParams; + /* Copy only compression parameters related to tables. */ + params.cParams = cdict->cParams; + if (windowLog) params.cParams.windowLog = windowLog; + params.fParams = fParams; + ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, + ZSTDcrp_noMemset, zbuff); + assert(cctx->appliedParams.cParams.strategy == cdict->cParams.strategy); + assert(cctx->appliedParams.cParams.hashLog == cdict->cParams.hashLog); + assert(cctx->appliedParams.cParams.chainLog == cdict->cParams.chainLog); + } + + /* copy tables */ + { size_t const chainSize = (cdict->cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict->cParams.chainLog); + size_t const hSize = (size_t)1 << cdict->cParams.hashLog; + size_t const tableSpace = (chainSize + hSize) * sizeof(U32); + assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */ + assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize); + assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize); /* chainTable must follow hashTable */ + assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize); + memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace); /* presumes all tables follow each other */ + } + /* Zero the hashTable3, since the cdict never fills it */ + { size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3; + assert(cdict->matchState.hashLog3 == 0); + memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); + } + + /* copy dictionary offsets */ + { + ZSTD_matchState_t const* srcMatchState = &cdict->matchState; + ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3; + dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; + } + cctx->dictID = cdict->dictID; + + /* copy block state */ + memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); + + return 0; +} + +/*! ZSTD_copyCCtx_internal() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * The "context", in this case, refers to the hash and chain tables, + * entropy tables, and dictionary references. + * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. + * @return : 0, or an error code */ +static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, + const ZSTD_CCtx* srcCCtx, + ZSTD_frameParameters fParams, + U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + DEBUGLOG(5, "ZSTD_copyCCtx_internal"); + if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong); + + memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); + { ZSTD_CCtx_params params = dstCCtx->requestedParams; + /* Copy only compression parameters related to tables. */ + params.cParams = srcCCtx->appliedParams.cParams; + params.fParams = fParams; + ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, + ZSTDcrp_noMemset, zbuff); + assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); + assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); + assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); + assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); + assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); + } + + /* copy tables */ + { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog); + size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; + size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize); /* chainTable must follow hashTable */ + assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize); + memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace); /* presumes all tables follow each other */ + } + + /* copy dictionary offsets */ + { + ZSTD_matchState_t const* srcMatchState = &srcCCtx->blockState.matchState; + ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3; + dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; + } + dstCCtx->dictID = srcCCtx->dictID; + + /* copy block state */ + memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); + + return 0; +} + +/*! ZSTD_copyCCtx() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * pledgedSrcSize==0 means "unknown". +* @return : 0, or an error code */ +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) +{ + ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0); + ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); + if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; + fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); + + return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, + fParams, pledgedSrcSize, + zbuff); +} + + +#define ZSTD_ROWSIZE 16 +/*! ZSTD_reduceTable() : + * reduce table indexes by `reducerValue`, or squash to zero. + * PreserveMark preserves "unsorted mark" for btlazy2 strategy. + * It must be set to a clear 0/1 value, to remove branch during inlining. + * Presume table size is a multiple of ZSTD_ROWSIZE + * to help auto-vectorization */ +FORCE_INLINE_TEMPLATE void +ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark) +{ + int const nbRows = (int)size / ZSTD_ROWSIZE; + int cellNb = 0; + int rowNb; + assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ + assert(size < (1U<<31)); /* can be casted to int */ + for (rowNb=0 ; rowNb < nbRows ; rowNb++) { + int column; + for (column=0; columnblockState.matchState; + { U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog; + ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); + } + + if (zc->appliedParams.cParams.strategy != ZSTD_fast) { + U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog; + if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2) + ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); + else + ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); + } + + if (ms->hashLog3) { + U32 const h3Size = (U32)1 << ms->hashLog3; + ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); + } +} + + +/*-******************************************************* +* Block entropic compression +*********************************************************/ + +/* See doc/zstd_compression_format.md for detailed format description */ + +size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); + memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); + MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw); + return ZSTD_blockHeaderSize+srcSize; +} + + +static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + BYTE* const ostart = (BYTE* const)dst; + U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); + + if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall); + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); + break; + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); + break; + default: /* not necessary : flSize is {1,2,3} */ + assert(0); + } + + memcpy(ostart + flSize, src, srcSize); + return srcSize + flSize; +} + +static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + BYTE* const ostart = (BYTE* const)dst; + U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); + + (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); + break; + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); + break; + default: /* not necessary : flSize is {1,2,3} */ + assert(0); + } + + ostart[flSize] = *(const BYTE*)src; + return flSize+1; +} + + +static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; } + +static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t const* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_strategy strategy, int disableLiteralCompression, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32* workspace, const int bmi2) +{ + size_t const minGain = ZSTD_minGain(srcSize); + size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); + BYTE* const ostart = (BYTE*)dst; + U32 singleStream = srcSize < 256; + symbolEncodingType_e hType = set_compressed; + size_t cLitSize; + + DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)", + disableLiteralCompression); + + /* Prepare nextEntropy assuming reusing the existing table */ + nextEntropy->hufCTable_repeatMode = prevEntropy->hufCTable_repeatMode; + memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, + sizeof(prevEntropy->hufCTable)); + + if (disableLiteralCompression) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + + /* small ? don't even attempt compression (speed opt) */ +# define COMPRESS_LITERALS_SIZE_MIN 63 + { size_t const minLitSize = (prevEntropy->hufCTable_repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; + if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + + if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ + { HUF_repeat repeat = prevEntropy->hufCTable_repeatMode; + int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; + if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; + cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, + workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2) + : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, + workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2); + if (repeat != HUF_repeat_none) { + /* reused the existing table */ + hType = set_repeat; + } + } + + if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) { + memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable)); + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + if (cLitSize==1) { + memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable)); + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } + + if (hType == set_compressed) { + /* using a newly constructed table */ + nextEntropy->hufCTable_repeatMode = HUF_repeat_check; + } + + /* Build header */ + switch(lhSize) + { + case 3: /* 2 - 2 - 10 - 10 */ + { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + MEM_writeLE24(ostart, lhc); + break; + } + case 4: /* 2 - 2 - 14 - 14 */ + { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); + MEM_writeLE32(ostart, lhc); + break; + } + case 5: /* 2 - 2 - 18 - 18 */ + { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (BYTE)(cLitSize >> 10); + break; + } + default: /* not possible : lhSize is {3,4,5} */ + assert(0); + } + return lhSize+cLitSize; +} + + +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +{ + const seqDef* const sequences = seqStorePtr->sequencesStart; + BYTE* const llCodeTable = seqStorePtr->llCode; + BYTE* const ofCodeTable = seqStorePtr->ofCode; + BYTE* const mlCodeTable = seqStorePtr->mlCode; + U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + U32 u; + for (u=0; ulongLengthID==1) + llCodeTable[seqStorePtr->longLengthPos] = MaxLL; + if (seqStorePtr->longLengthID==2) + mlCodeTable[seqStorePtr->longLengthPos] = MaxML; +} + +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1 +} ZSTD_defaultPolicy_e; + +MEM_STATIC +symbolEncodingType_e ZSTD_selectEncodingType( + FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq, + U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed) +{ +#define MIN_SEQ_FOR_DYNAMIC_FSE 64 +#define MAX_SEQ_FOR_STATIC_FSE 1000 + ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); + if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) { + DEBUGLOG(5, "Selected set_rle"); + /* Prefer set_basic over set_rle when there are 2 or less symbols, + * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. + * If basic encoding isn't possible, always choose RLE. + */ + *repeatMode = FSE_repeat_check; + return set_rle; + } + if ( isDefaultAllowed + && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { + DEBUGLOG(5, "Selected set_repeat"); + return set_repeat; + } + if ( isDefaultAllowed + && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) ) { + DEBUGLOG(5, "Selected set_basic"); + /* The format allows default tables to be repeated, but it isn't useful. + * When using simple heuristics to select encoding type, we don't want + * to confuse these tables with dictionaries. When running more careful + * analysis, we don't need to waste time checking both repeating tables + * and default tables. + */ + *repeatMode = FSE_repeat_none; + return set_basic; + } + DEBUGLOG(5, "Selected set_compressed"); + *repeatMode = FSE_repeat_check; + return set_compressed; +} + +MEM_STATIC +size_t ZSTD_buildCTable(void* dst, size_t dstCapacity, + FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, + U32* count, U32 max, + BYTE const* codeTable, size_t nbSeq, + S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax, + FSE_CTable const* prevCTable, size_t prevCTableSize, + void* workspace, size_t workspaceSize) +{ + BYTE* op = (BYTE*)dst; + BYTE const* const oend = op + dstCapacity; + + switch (type) { + case set_rle: + *op = codeTable[0]; + CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max)); + return 1; + case set_repeat: + memcpy(nextCTable, prevCTable, prevCTableSize); + return 0; + case set_basic: + CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize)); /* note : could be pre-calculated */ + return 0; + case set_compressed: { + S16 norm[MaxSeq + 1]; + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + if (count[codeTable[nbSeq-1]] > 1) { + count[codeTable[nbSeq-1]]--; + nbSeq_1--; + } + assert(nbSeq_1 > 1); + CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max)); + { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ + if (FSE_isError(NCountSize)) return NCountSize; + CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize)); + return NCountSize; + } + } + default: return assert(0), ERROR(GENERIC); + } +} + +FORCE_INLINE_TEMPLATE size_t +ZSTD_encodeSequences_body( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets) +{ + BIT_CStream_t blockStream; + FSE_CState_t stateMatchLength; + FSE_CState_t stateOffsetBits; + FSE_CState_t stateLitLength; + + CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */ + + /* first symbols */ + FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); + FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); + FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); + BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + if (longOffsets) { + U32 const ofBits = ofCodeTable[nbSeq-1]; + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); + BIT_flushBits(&blockStream); + } + BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, + ofBits - extraBits); + } else { + BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); + } + BIT_flushBits(&blockStream); + + { size_t n; + for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) + BIT_flushBits(&blockStream); /* (7)*/ + BIT_addBits(&blockStream, sequences[n].litLength, llBits); + if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); + if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); + if (longOffsets) { + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[n].offset, extraBits); + BIT_flushBits(&blockStream); /* (7)*/ + } + BIT_addBits(&blockStream, sequences[n].offset >> extraBits, + ofBits - extraBits); /* 31 */ + } else { + BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ + } + BIT_flushBits(&blockStream); /* (7)*/ + } } + + DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); + FSE_flushCState(&blockStream, &stateMatchLength); + DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); + FSE_flushCState(&blockStream, &stateOffsetBits); + DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); + FSE_flushCState(&blockStream, &stateLitLength); + + { size_t const streamSize = BIT_closeCStream(&blockStream); + if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */ + return streamSize; + } +} + +static size_t +ZSTD_encodeSequences_default( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets) +{ + return ZSTD_encodeSequences_body(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} + + +#if DYNAMIC_BMI2 + +static TARGET_ATTRIBUTE("bmi2") size_t +ZSTD_encodeSequences_bmi2( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets) +{ + return ZSTD_encodeSequences_body(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} + +#endif + +size_t ZSTD_encodeSequences( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) +{ +#if DYNAMIC_BMI2 + if (bmi2) { + return ZSTD_encodeSequences_bmi2(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); + } +#endif + (void)bmi2; + return ZSTD_encodeSequences_default(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} + +MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, + ZSTD_entropyCTables_t const* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params const* cctxParams, + void* dst, size_t dstCapacity, U32* workspace, + const int bmi2) +{ + const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; + U32 count[MaxSeq+1]; + FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; + FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; + FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; + U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ + const seqDef* const sequences = seqStorePtr->sequencesStart; + const BYTE* const ofCodeTable = seqStorePtr->ofCode; + const BYTE* const llCodeTable = seqStorePtr->llCode; + const BYTE* const mlCodeTable = seqStorePtr->mlCode; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + BYTE* seqHead; + + ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<litStart; + size_t const litSize = seqStorePtr->lit - literals; + size_t const cSize = ZSTD_compressLiterals( + prevEntropy, nextEntropy, + cctxParams->cParams.strategy, cctxParams->disableLiteralCompression, + op, dstCapacity, + literals, litSize, + workspace, bmi2); + if (ZSTD_isError(cSize)) + return cSize; + assert(cSize <= dstCapacity); + op += cSize; + } + + /* Sequences Header */ + if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall); + if (nbSeq < 0x7F) + *op++ = (BYTE)nbSeq; + else if (nbSeq < LONGNBSEQ) + op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; + else + op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; + if (nbSeq==0) { + memcpy(nextEntropy->litlengthCTable, prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable)); + nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; + memcpy(nextEntropy->offcodeCTable, prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable)); + nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; + memcpy(nextEntropy->matchlengthCTable, prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable)); + nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; + return op - ostart; + } + + /* seqHead : flags for FSE encoding type */ + seqHead = op++; + + /* convert length/distances into codes */ + ZSTD_seqToCodes(seqStorePtr); + /* build CTable for Literal Lengths */ + { U32 max = MaxLL; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace); + DEBUGLOG(5, "Building LL table"); + nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; + LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed); + { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, + count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, + prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable), + workspace, HUF_WORKSPACE_SIZE); + if (ZSTD_isError(countSize)) return countSize; + op += countSize; + } } + /* build CTable for Offsets */ + { U32 max = MaxOff; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace); + /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ + ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; + DEBUGLOG(5, "Building OF table"); + nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; + Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy); + { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, + count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable), + workspace, HUF_WORKSPACE_SIZE); + if (ZSTD_isError(countSize)) return countSize; + op += countSize; + } } + /* build CTable for MatchLengths */ + { U32 max = MaxML; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace); + DEBUGLOG(5, "Building ML table"); + nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; + MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed); + { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, + count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, + prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable), + workspace, HUF_WORKSPACE_SIZE); + if (ZSTD_isError(countSize)) return countSize; + op += countSize; + } } + + *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); + + { size_t const bitstreamSize = ZSTD_encodeSequences( + op, oend - op, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, + longOffsets, bmi2); + if (ZSTD_isError(bitstreamSize)) return bitstreamSize; + op += bitstreamSize; + } + + return op - ostart; +} + +MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr, + ZSTD_entropyCTables_t const* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + ZSTD_CCtx_params const* cctxParams, + void* dst, size_t dstCapacity, + size_t srcSize, U32* workspace, int bmi2) +{ + size_t const cSize = ZSTD_compressSequences_internal( + seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity, + workspace, bmi2); + /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. + * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. + */ + if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) + return 0; /* block not compressed */ + if (ZSTD_isError(cSize)) return cSize; + + /* Check compressibility */ + { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize); /* note : fixed formula, maybe should depend on compression level, or strategy */ + if (cSize >= maxCSize) return 0; /* block not compressed */ + } + + /* We check that dictionaries have offset codes available for the first + * block. After the first block, the offcode table might not have large + * enough codes to represent the offsets in the data. + */ + if (nextEntropy->offcode_repeatMode == FSE_repeat_valid) + nextEntropy->offcode_repeatMode = FSE_repeat_check; + + return cSize; +} + +/* ZSTD_selectBlockCompressor() : + * Not static, but internal use only (used by long distance matcher) + * assumption : strat is a valid strategy */ +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) +{ + static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = { + { ZSTD_compressBlock_fast /* default for 0 */, + ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, + ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, + ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra }, + { ZSTD_compressBlock_fast_extDict /* default for 0 */, + ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, + ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, + ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict } + }; + ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); + + assert((U32)strat >= (U32)ZSTD_fast); + assert((U32)strat <= (U32)ZSTD_btultra); + return blockCompressor[extDict!=0][(U32)strat]; +} + +static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, + const BYTE* anchor, size_t lastLLSize) +{ + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; +} + +static void ZSTD_resetSeqStore(seqStore_t* ssPtr) +{ + ssPtr->lit = ssPtr->litStart; + ssPtr->sequences = ssPtr->sequencesStart; + ssPtr->longLengthID = 0; +} + +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + ZSTD_matchState_t* const ms = &zc->blockState.matchState; + DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", + (U32)dstCapacity, ms->window.dictLimit, ms->nextToUpdate); + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { + ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.searchLength); + return 0; /* don't even attempt compression below a certain srcSize */ + } + ZSTD_resetSeqStore(&(zc->seqStore)); + + /* limited update after a very long match */ + { const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const U32 current = (U32)(istart-base); + if (current > ms->nextToUpdate + 384) + ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384)); + } + + /* select and store sequences */ + { U32 const extDict = ZSTD_window_hasExtDict(ms->window); + size_t lastLLSize; + { int i; + for (i = 0; i < ZSTD_REP_NUM; ++i) + zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; + } + if (zc->externSeqStore.pos < zc->externSeqStore.size) { + assert(!zc->appliedParams.ldmParams.enableLdm); + /* Updates ldmSeqStore.pos */ + lastLLSize = + ZSTD_ldm_blockCompress(&zc->externSeqStore, + ms, &zc->seqStore, + zc->blockState.nextCBlock->rep, + &zc->appliedParams.cParams, + src, srcSize, extDict); + assert(zc->externSeqStore.pos <= zc->externSeqStore.size); + } else if (zc->appliedParams.ldmParams.enableLdm) { + rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0}; + + ldmSeqStore.seq = zc->ldmSequences; + ldmSeqStore.capacity = zc->maxNbLdmSequences; + /* Updates ldmSeqStore.size */ + CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, + &zc->appliedParams.ldmParams, + src, srcSize)); + /* Updates ldmSeqStore.pos */ + lastLLSize = + ZSTD_ldm_blockCompress(&ldmSeqStore, + ms, &zc->seqStore, + zc->blockState.nextCBlock->rep, + &zc->appliedParams.cParams, + src, srcSize, extDict); + assert(ldmSeqStore.pos == ldmSeqStore.size); + } else { /* not long range mode */ + ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict); + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, &zc->appliedParams.cParams, src, srcSize); + } + { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; + ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); + } } + + /* encode sequences and literals */ + { size_t const cSize = ZSTD_compressSequences(&zc->seqStore, + &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + dst, dstCapacity, + srcSize, zc->entropyWorkspace, zc->bmi2); + if (ZSTD_isError(cSize) || cSize == 0) return cSize; + /* confirm repcodes and entropy tables */ + { ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock; + zc->blockState.prevCBlock = zc->blockState.nextCBlock; + zc->blockState.nextCBlock = tmp; + } + return cSize; + } +} + + +/*! ZSTD_compress_frameChunk() : +* Compress a chunk of data into one or multiple blocks. +* All blocks will be terminated, all input will be consumed. +* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. +* Frame is supposed already started (header already produced) +* @return : compressed size, or an error code +*/ +static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastFrameChunk) +{ + size_t blockSize = cctx->blockSize; + size_t remaining = srcSize; + const BYTE* ip = (const BYTE*)src; + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; + assert(cctx->appliedParams.cParams.windowLog <= 31); + + DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize); + if (cctx->appliedParams.fParams.checksumFlag && srcSize) + XXH64_update(&cctx->xxhState, src, srcSize); + + while (remaining) { + ZSTD_matchState_t* const ms = &cctx->blockState.matchState; + U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); + + if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) + return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ + if (remaining < blockSize) blockSize = remaining; + + if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) { + U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy); + U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); + ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); + + ZSTD_reduceIndex(cctx, correction); + if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; + else ms->nextToUpdate -= correction; + ms->loadedDictEnd = 0; + } + ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd); + if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; + + { size_t cSize = ZSTD_compressBlock_internal(cctx, + op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, + ip, blockSize); + if (ZSTD_isError(cSize)) return cSize; + + if (cSize == 0) { /* block is not compressible */ + U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3); + if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); + MEM_writeLE32(op, cBlockHeader24); /* 4th byte will be overwritten */ + memcpy(op + ZSTD_blockHeaderSize, ip, blockSize); + cSize = ZSTD_blockHeaderSize + blockSize; + } else { + U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); + MEM_writeLE24(op, cBlockHeader24); + cSize += ZSTD_blockHeaderSize; + } + + ip += blockSize; + assert(remaining >= blockSize); + remaining -= blockSize; + op += cSize; + assert(dstCapacity >= cSize); + dstCapacity -= cSize; + DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", + (U32)cSize); + } } + + if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; + return op-ostart; +} + + +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, + ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID) +{ BYTE* const op = (BYTE*)dst; + U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ + U32 const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ + U32 const checksumFlag = params.fParams.checksumFlag>0; + U32 const windowSize = (U32)1 << params.cParams.windowLog; + U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); + BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); + U32 const fcsCode = params.fParams.contentSizeFlag ? + (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ + BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); + size_t pos=0; + + if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall); + DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", + !params.fParams.noDictIDFlag, dictID, dictIDSizeCode); + + if (params.format == ZSTD_f_zstd1) { + MEM_writeLE32(dst, ZSTD_MAGICNUMBER); + pos = 4; + } + op[pos++] = frameHeaderDecriptionByte; + if (!singleSegment) op[pos++] = windowLogByte; + switch(dictIDSizeCode) + { + default: assert(0); /* impossible */ + case 0 : break; + case 1 : op[pos] = (BYTE)(dictID); pos++; break; + case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; + case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; + } + switch(fcsCode) + { + default: assert(0); /* impossible */ + case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; + case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; + case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; + case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; + } + return pos; +} + +/* ZSTD_writeLastEmptyBlock() : + * output an empty Block with end-of-frame mark to complete a frame + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) + * or an error code if `dstCapcity` is too small (stage != ZSTDcs_init) + return ERROR(stage_wrong); + if (cctx->appliedParams.ldmParams.enableLdm) + return ERROR(parameter_unsupported); + cctx->externSeqStore.seq = seq; + cctx->externSeqStore.size = nbSeq; + cctx->externSeqStore.capacity = nbSeq; + cctx->externSeqStore.pos = 0; + return 0; +} + + +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 frame, U32 lastFrameChunk) +{ + ZSTD_matchState_t* ms = &cctx->blockState.matchState; + size_t fhSize = 0; + + DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", + cctx->stage, (U32)srcSize); + if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ + + if (frame && (cctx->stage==ZSTDcs_init)) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, + cctx->pledgedSrcSizePlusOne-1, cctx->dictID); + if (ZSTD_isError(fhSize)) return fhSize; + dstCapacity -= fhSize; + dst = (char*)dst + fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + if (!srcSize) return fhSize; /* do not generate an empty block if no input */ + + if (!ZSTD_window_update(&ms->window, src, srcSize)) { + ms->nextToUpdate = ms->window.dictLimit; + } + if (cctx->appliedParams.ldmParams.enableLdm) + ZSTD_window_update(&cctx->ldmState.window, src, srcSize); + + DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize); + { size_t const cSize = frame ? + ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : + ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); + if (ZSTD_isError(cSize)) return cSize; + cctx->consumedSrcSize += srcSize; + cctx->producedCSize += (cSize + fhSize); + if (cctx->appliedParams.fParams.contentSizeFlag) { /* control src size */ + if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) { + DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u", + (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize); + return ERROR(srcSize_wrong); + } + } + return cSize + fhSize; + } +} + +size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize); + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); +} + + +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +{ + ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; + assert(!ZSTD_checkCParams(cParams)); + return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); +} + +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const blockSizeMax = ZSTD_getBlockSize(cctx); + if (srcSize > blockSizeMax) return ERROR(srcSize_wrong); + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); +} + +/*! ZSTD_loadDictionaryContent() : + * @return : 0, or an error code + */ +static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* src, size_t srcSize) +{ + const BYTE* const ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + ZSTD_compressionParameters const* cParams = ¶ms->cParams; + + ZSTD_window_update(&ms->window, src, srcSize); + ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); + + if (srcSize <= HASH_READ_SIZE) return 0; + + switch(params->cParams.strategy) + { + case ZSTD_fast: + ZSTD_fillHashTable(ms, cParams, iend); + break; + case ZSTD_dfast: + ZSTD_fillDoubleHashTable(ms, cParams, iend); + break; + + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + if (srcSize >= HASH_READ_SIZE) + ZSTD_insertAndFindFirstIndex(ms, cParams, iend-HASH_READ_SIZE); + break; + + case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ + case ZSTD_btopt: + case ZSTD_btultra: + if (srcSize >= HASH_READ_SIZE) + ZSTD_updateTree(ms, cParams, iend-HASH_READ_SIZE, iend); + break; + + default: + assert(0); /* not possible : not a valid strategy id */ + } + + ms->nextToUpdate = (U32)(iend - ms->window.base); + return 0; +} + + +/* Dictionaries that assign zero probability to symbols that show up causes problems + when FSE encoding. Refuse dictionaries that assign zero probability to symbols + that we may encounter during compression. + NOTE: This behavior is not standard and could be improved in the future. */ +static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { + U32 s; + if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted); + for (s = 0; s <= maxSymbolValue; ++s) { + if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted); + } + return 0; +} + + +/* Dictionary format : + * See : + * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format + */ +/*! ZSTD_loadZstdDictionary() : + * @return : dictID, or an error code + * assumptions : magic number supposed already checked + * dictSize supposed > 8 + */ +static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, void* workspace) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + short offcodeNCount[MaxOff+1]; + unsigned offcodeMaxValue = MaxOff; + size_t dictID; + + ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); + dictPtr += 4; + + { unsigned maxSymbolValue = 255; + size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr); + if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted); + if (maxSymbolValue < 255) return ERROR(dictionary_corrupted); + dictPtr += hufHeaderSize; + } + + { unsigned offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); + if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); + /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ + CHECK_E( FSE_buildCTable_wksp(bs->entropy.offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, workspace, HUF_WORKSPACE_SIZE), + dictionary_corrupted); + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); + /* Every match length code must have non-zero probability */ + CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); + CHECK_E( FSE_buildCTable_wksp(bs->entropy.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE), + dictionary_corrupted); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); + /* Every literal length code must have non-zero probability */ + CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); + CHECK_E( FSE_buildCTable_wksp(bs->entropy.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE), + dictionary_corrupted); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); + bs->rep[0] = MEM_readLE32(dictPtr+0); + bs->rep[1] = MEM_readLE32(dictPtr+4); + bs->rep[2] = MEM_readLE32(dictPtr+8); + dictPtr += 12; + + { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); + U32 offcodeMax = MaxOff; + if (dictContentSize <= ((U32)-1) - 128 KB) { + U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ + offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ + } + /* All offset values <= dictContentSize + 128 KB must be representable */ + CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); + /* All repCodes must be <= dictContentSize and != 0*/ + { U32 u; + for (u=0; u<3; u++) { + if (bs->rep[u] == 0) return ERROR(dictionary_corrupted); + if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted); + } } + + bs->entropy.hufCTable_repeatMode = HUF_repeat_valid; + bs->entropy.offcode_repeatMode = FSE_repeat_valid; + bs->entropy.matchlength_repeatMode = FSE_repeat_valid; + bs->entropy.litlength_repeatMode = FSE_repeat_valid; + CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize)); + return dictID; + } +} + +/** ZSTD_compress_insertDictionary() : +* @return : dictID, or an error code */ +static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, + ZSTD_CCtx_params const* params, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + void* workspace) +{ + DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); + if ((dict==NULL) || (dictSize<=8)) return 0; + + ZSTD_reset_compressedBlockState(bs); + + /* dict restricted modes */ + if (dictContentType == ZSTD_dct_rawContent) + return ZSTD_loadDictionaryContent(ms, params, dict, dictSize); + + if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { + if (dictContentType == ZSTD_dct_auto) { + DEBUGLOG(4, "raw content dictionary detected"); + return ZSTD_loadDictionaryContent(ms, params, dict, dictSize); + } + if (dictContentType == ZSTD_dct_fullDict) + return ERROR(dictionary_wrong); + assert(0); /* impossible */ + } + + /* dict as full zstd dictionary */ + return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, workspace); +} + +/*! ZSTD_compressBegin_internal() : + * @return : 0, or an error code */ +size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog); + /* params are supposed to be fully validated at this point */ + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + + if (cdict && cdict->dictContentSize>0) { + cctx->requestedParams = params; + return ZSTD_resetCCtx_usingCDict(cctx, cdict, params.cParams.windowLog, + params.fParams, pledgedSrcSize, zbuff); + } + + CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, + ZSTDcrp_continue, zbuff) ); + { + size_t const dictID = ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, &cctx->blockState.matchState, + ¶ms, dict, dictSize, dictContentType, cctx->entropyWorkspace); + if (ZSTD_isError(dictID)) return dictID; + assert(dictID <= (size_t)(U32)-1); + cctx->dictID = (U32)dictID; + } + return 0; +} + +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog); + /* compression parameters verification and optimization */ + CHECK_F( ZSTD_checkCParams(params.cParams) ); + return ZSTD_compressBegin_internal(cctx, + dict, dictSize, dictContentType, + cdict, + params, pledgedSrcSize, + ZSTDb_not_buffered); +} + +/*! ZSTD_compressBegin_advanced() : +* @return : 0, or an error code */ +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); + return ZSTD_compressBegin_advanced_internal(cctx, + dict, dictSize, ZSTD_dct_auto, + NULL /*cdict*/, + cctxParams, pledgedSrcSize); +} + +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); + DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize); + return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL, + cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); +} + +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) +{ + return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); +} + + +/*! ZSTD_writeEpilogue() : +* Ends a frame. +* @return : nb of bytes written into dst (or an error code) */ +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + size_t fhSize = 0; + + DEBUGLOG(4, "ZSTD_writeEpilogue"); + if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */ + + /* special case : empty frame */ + if (cctx->stage == ZSTDcs_init) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0); + if (ZSTD_isError(fhSize)) return fhSize; + dstCapacity -= fhSize; + op += fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + if (cctx->stage != ZSTDcs_ending) { + /* write one last empty block, make it the "last" block */ + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; + if (dstCapacity<4) return ERROR(dstSize_tooSmall); + MEM_writeLE32(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + } + + if (cctx->appliedParams.fParams.checksumFlag) { + U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); + if (dstCapacity<4) return ERROR(dstSize_tooSmall); + DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum); + MEM_writeLE32(op, checksum); + op += 4; + } + + cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ + return op-ostart; +} + +size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t endResult; + size_t const cSize = ZSTD_compressContinue_internal(cctx, + dst, dstCapacity, src, srcSize, + 1 /* frame mode */, 1 /* last chunk */); + if (ZSTD_isError(cSize)) return cSize; + endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); + if (ZSTD_isError(endResult)) return endResult; + if (cctx->appliedParams.fParams.contentSizeFlag) { /* control src size */ + DEBUGLOG(4, "end of frame : controlling src size"); + if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) { + DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u", + (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize); + return ERROR(srcSize_wrong); + } } + return cSize + endResult; +} + + +static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params) +{ + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); + DEBUGLOG(4, "ZSTD_compress_internal"); + return ZSTD_compress_advanced_internal(cctx, + dst, dstCapacity, + src, srcSize, + dict, dictSize, + cctxParams); +} + +size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params) +{ + DEBUGLOG(4, "ZSTD_compress_advanced"); + CHECK_F(ZSTD_checkCParams(params.cParams)); + return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); +} + +/* Internal */ +size_t ZSTD_compress_advanced_internal( + ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_CCtx_params params) +{ + DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", + (U32)srcSize); + CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL, + params, srcSize, ZSTDb_not_buffered) ); + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + +size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, + const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize ? srcSize : 1, dict ? dictSize : 0); + ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params); + assert(params.fParams.contentSizeFlag == 1); + ZSTD_CCtxParam_setParameter(&cctxParams, ZSTD_p_compressLiterals, compressionLevel>=0); + return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams); +} + +size_t ZSTD_compressCCtx (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) +{ + DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize); + return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); +} + +size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel) +{ + size_t result; + ZSTD_CCtx ctxBody; + memset(&ctxBody, 0, sizeof(ctxBody)); + ctxBody.customMem = ZSTD_defaultCMem; + result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); + ZSTD_free(ctxBody.workSpace, ZSTD_defaultCMem); /* can't free ctxBody itself, as it's on stack; free only heap content */ + return result; +} + + +/* ===== Dictionary API ===== */ + +/*! ZSTD_estimateCDictSize_advanced() : + * Estimate amount of memory that will be needed to create a dictionary with following arguments */ +size_t ZSTD_estimateCDictSize_advanced( + size_t dictSize, ZSTD_compressionParameters cParams, + ZSTD_dictLoadMethod_e dictLoadMethod) +{ + DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict)); + return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); +} + +size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); + return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); +} + +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support sizeof on NULL */ + DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict)); + return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict); +} + +static size_t ZSTD_initCDict_internal( + ZSTD_CDict* cdict, + const void* dictBuffer, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams) +{ + DEBUGLOG(3, "ZSTD_initCDict_internal, dictContentType %u", (U32)dictContentType); + assert(!ZSTD_checkCParams(cParams)); + cdict->cParams = cParams; + if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { + cdict->dictBuffer = NULL; + cdict->dictContent = dictBuffer; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem); + cdict->dictBuffer = internalBuffer; + cdict->dictContent = internalBuffer; + if (!internalBuffer) return ERROR(memory_allocation); + memcpy(internalBuffer, dictBuffer, dictSize); + } + cdict->dictContentSize = dictSize; + + /* Reset the state to no dictionary */ + ZSTD_reset_compressedBlockState(&cdict->cBlockState); + { void* const end = ZSTD_reset_matchState( + &cdict->matchState, + (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32, + &cParams, ZSTDcrp_continue, /* forCCtx */ 0); + assert(end == (char*)cdict->workspace + cdict->workspaceSize); + (void)end; + } + /* (Maybe) load the dictionary + * Skips loading the dictionary if it is <= 8 bytes. + */ + { ZSTD_CCtx_params params; + memset(¶ms, 0, sizeof(params)); + params.compressionLevel = ZSTD_CLEVEL_DEFAULT; + params.fParams.contentSizeFlag = 1; + params.cParams = cParams; + { size_t const dictID = ZSTD_compress_insertDictionary( + &cdict->cBlockState, &cdict->matchState, ¶ms, + cdict->dictContent, cdict->dictContentSize, + dictContentType, cdict->workspace); + if (ZSTD_isError(dictID)) return dictID; + assert(dictID <= (size_t)(U32)-1); + cdict->dictID = (U32)dictID; + } + } + + return 0; +} + +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams, ZSTD_customMem customMem) +{ + DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType); + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + + { ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem); + size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); + void* const workspace = ZSTD_malloc(workspaceSize, customMem); + + if (!cdict || !workspace) { + ZSTD_free(cdict, customMem); + ZSTD_free(workspace, customMem); + return NULL; + } + cdict->customMem = customMem; + cdict->workspace = workspace; + cdict->workspaceSize = workspaceSize; + if (ZSTD_isError( ZSTD_initCDict_internal(cdict, + dictBuffer, dictSize, + dictLoadMethod, dictContentType, + cParams) )) { + ZSTD_freeCDict(cdict); + return NULL; + } + + return cdict; + } +} + +ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); + return ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byCopy, ZSTD_dct_auto, + cParams, ZSTD_defaultCMem); +} + +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize); + return ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byRef, ZSTD_dct_auto, + cParams, ZSTD_defaultCMem); +} + +size_t ZSTD_freeCDict(ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = cdict->customMem; + ZSTD_free(cdict->workspace, cMem); + ZSTD_free(cdict->dictBuffer, cMem); + ZSTD_free(cdict, cMem); + return 0; + } +} + +/*! ZSTD_initStaticCDict_advanced() : + * Generate a digested dictionary in provided memory area. + * workspace: The memory area to emplace the dictionary into. + * Provided pointer must 8-bytes aligned. + * It must outlive dictionary usage. + * workspaceSize: Use ZSTD_estimateCDictSize() + * to determine how large workspace must be. + * cParams : use ZSTD_getCParams() to transform a compression level + * into its relevants cParams. + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) + * Note : there is no corresponding "free" function. + * Since workspace was allocated externally, it must be freed externally. + */ +const ZSTD_CDict* ZSTD_initStaticCDict( + void* workspace, size_t workspaceSize, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams) +{ + size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); + size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize) + + HUF_WORKSPACE_SIZE + matchStateSize; + ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace; + void* ptr; + if ((size_t)workspace & 7) return NULL; /* 8-aligned */ + DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", + (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize)); + if (workspaceSize < neededSize) return NULL; + + if (dictLoadMethod == ZSTD_dlm_byCopy) { + memcpy(cdict+1, dict, dictSize); + dict = cdict+1; + ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize; + } else { + ptr = cdict+1; + } + cdict->workspace = ptr; + cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize; + + if (ZSTD_isError( ZSTD_initCDict_internal(cdict, + dict, dictSize, + ZSTD_dlm_byRef, dictContentType, + cParams) )) + return NULL; + + return cdict; +} + +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) +{ + assert(cdict != NULL); + return cdict->cParams; +} + +/* ZSTD_compressBegin_usingCDict_advanced() : + * cdict must be != NULL */ +size_t ZSTD_compressBegin_usingCDict_advanced( + ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, + ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced"); + if (cdict==NULL) return ERROR(dictionary_wrong); + { ZSTD_CCtx_params params = cctx->requestedParams; + params.cParams = ZSTD_getCParamsFromCDict(cdict); + /* Increase window log to fit the entire dictionary and source if the + * source size is known. Limit the increase to 19, which is the + * window log for compression level 1 with the largest source size. + */ + if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { + U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); + U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; + params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog); + } + params.fParams = fParams; + return ZSTD_compressBegin_internal(cctx, + NULL, 0, ZSTD_dct_auto, + cdict, + params, pledgedSrcSize, + ZSTDb_not_buffered); + } +} + +/* ZSTD_compressBegin_usingCDict() : + * pledgedSrcSize=0 means "unknown" + * if pledgedSrcSize>0, it will enable contentSizeFlag */ +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag); + return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0); +} + +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) +{ + CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize)); /* will check if cdict != NULL */ + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + +/*! ZSTD_compress_usingCDict() : + * Compression using a digested Dictionary. + * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + * Note that compression parameters are decided at CDict creation time + * while frame parameters are hardcoded */ +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); +} + + + +/* ****************************************************************** +* Streaming +********************************************************************/ + +ZSTD_CStream* ZSTD_createCStream(void) +{ + DEBUGLOG(3, "ZSTD_createCStream"); + return ZSTD_createCStream_advanced(ZSTD_defaultCMem); +} + +ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) +{ + return ZSTD_initStaticCCtx(workspace, workspaceSize); +} + +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) +{ /* CStream and CCtx are now same object */ + return ZSTD_createCCtx_advanced(customMem); +} + +size_t ZSTD_freeCStream(ZSTD_CStream* zcs) +{ + return ZSTD_freeCCtx(zcs); /* same object */ +} + + + +/*====== Initialization ======*/ + +size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } + +size_t ZSTD_CStreamOutSize(void) +{ + return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; +} + +static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx, + const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType, + const ZSTD_CDict* const cdict, + ZSTD_CCtx_params const params, unsigned long long const pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_resetCStream_internal (disableLiteralCompression=%i)", + params.disableLiteralCompression); + /* params are supposed to be fully validated at this point */ + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + + CHECK_F( ZSTD_compressBegin_internal(cctx, + dict, dictSize, dictContentType, + cdict, + params, pledgedSrcSize, + ZSTDb_buffered) ); + + cctx->inToCompress = 0; + cctx->inBuffPos = 0; + cctx->inBuffTarget = cctx->blockSize + + (cctx->blockSize == pledgedSrcSize); /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */ + cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; + cctx->streamStage = zcss_load; + cctx->frameEnded = 0; + return 0; /* ready to go */ +} + +/* ZSTD_resetCStream(): + * pledgedSrcSize == 0 means "unknown" */ +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) +{ + ZSTD_CCtx_params params = zcs->requestedParams; + DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize); + if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; + params.fParams.contentSizeFlag = 1; + params.cParams = ZSTD_getCParamsFromCCtxParams(¶ms, pledgedSrcSize, 0); + return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize); +} + +/*! ZSTD_initCStream_internal() : + * Note : for lib/compress only. Used by zstdmt_compress.c. + * Assumption 1 : params are valid + * Assumption 2 : either dict, or cdict, is defined, not both */ +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_initCStream_internal"); + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + + if (dict && dictSize >= 8) { + DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize); + if (zcs->staticSize) { /* static CCtx : never uses malloc */ + /* incompatible with internal cdict creation */ + return ERROR(memory_allocation); + } + ZSTD_freeCDict(zcs->cdictLocal); + zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byCopy, ZSTD_dct_auto, + params.cParams, zcs->customMem); + zcs->cdict = zcs->cdictLocal; + if (zcs->cdictLocal == NULL) return ERROR(memory_allocation); + } else { + if (cdict) { + params.cParams = ZSTD_getCParamsFromCDict(cdict); /* cParams are enforced from cdict; it includes windowLog */ + } + ZSTD_freeCDict(zcs->cdictLocal); + zcs->cdictLocal = NULL; + zcs->cdict = cdict; + } + + return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize); +} + +/* ZSTD_initCStream_usingCDict_advanced() : + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, + const ZSTD_CDict* cdict, + ZSTD_frameParameters fParams, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); + if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */ + { ZSTD_CCtx_params params = zcs->requestedParams; + params.cParams = ZSTD_getCParamsFromCDict(cdict); + params.fParams = fParams; + return ZSTD_initCStream_internal(zcs, + NULL, 0, cdict, + params, pledgedSrcSize); + } +} + +/* note : cdict must outlive compression session */ +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ }; + DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); + return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); /* note : will check that cdict != NULL */ +} + + +/* ZSTD_initCStream_advanced() : + * pledgedSrcSize must be exact. + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. + * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */ +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u", + (U32)pledgedSrcSize, params.fParams.contentSizeFlag); + CHECK_F( ZSTD_checkCParams(params.cParams) ); + if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */ + { ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); + return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, cctxParams, pledgedSrcSize); + } +} + +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); + return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN); +} + +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) +{ + U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0); + ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params); + return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize); +} + +size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) +{ + DEBUGLOG(4, "ZSTD_initCStream"); + return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN); +} + +/*====== Compression ======*/ + +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t const length = MIN(dstCapacity, srcSize); + if (length) memcpy(dst, src, length); + return length; +} + +/** ZSTD_compressStream_generic(): + * internal function for all *compressStream*() variants and *compress_generic() + * non-static, because can be called from zstdmt_compress.c + * @return : hint size for next input */ +size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective const flushMode) +{ + const char* const istart = (const char*)input->src; + const char* const iend = istart + input->size; + const char* ip = istart + input->pos; + char* const ostart = (char*)output->dst; + char* const oend = ostart + output->size; + char* op = ostart + output->pos; + U32 someMoreWork = 1; + + /* check expectations */ + DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode); + assert(zcs->inBuff != NULL); + assert(zcs->inBuffSize > 0); + assert(zcs->outBuff != NULL); + assert(zcs->outBuffSize > 0); + assert(output->pos <= output->size); + assert(input->pos <= input->size); + + while (someMoreWork) { + switch(zcs->streamStage) + { + case zcss_init: + /* call ZSTD_initCStream() first ! */ + return ERROR(init_missing); + + case zcss_load: + if ( (flushMode == ZSTD_e_end) + && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */ + && (zcs->inBuffPos == 0) ) { + /* shortcut to compression pass directly into output buffer */ + size_t const cSize = ZSTD_compressEnd(zcs, + op, oend-op, ip, iend-ip); + DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize); + if (ZSTD_isError(cSize)) return cSize; + ip = iend; + op += cSize; + zcs->frameEnded = 1; + ZSTD_startNewCompression(zcs); + someMoreWork = 0; break; + } + /* complete loading into inBuffer */ + { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; + size_t const loaded = ZSTD_limitCopy( + zcs->inBuff + zcs->inBuffPos, toLoad, + ip, iend-ip); + zcs->inBuffPos += loaded; + ip += loaded; + if ( (flushMode == ZSTD_e_continue) + && (zcs->inBuffPos < zcs->inBuffTarget) ) { + /* not enough input to fill full block : stop here */ + someMoreWork = 0; break; + } + if ( (flushMode == ZSTD_e_flush) + && (zcs->inBuffPos == zcs->inToCompress) ) { + /* empty */ + someMoreWork = 0; break; + } + } + /* compress current block (note : this stage cannot be stopped in the middle) */ + DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); + { void* cDst; + size_t cSize; + size_t const iSize = zcs->inBuffPos - zcs->inToCompress; + size_t oSize = oend-op; + unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); + if (oSize >= ZSTD_compressBound(iSize)) + cDst = op; /* compress into output buffer, to skip flush stage */ + else + cDst = zcs->outBuff, oSize = zcs->outBuffSize; + cSize = lastBlock ? + ZSTD_compressEnd(zcs, cDst, oSize, + zcs->inBuff + zcs->inToCompress, iSize) : + ZSTD_compressContinue(zcs, cDst, oSize, + zcs->inBuff + zcs->inToCompress, iSize); + if (ZSTD_isError(cSize)) return cSize; + zcs->frameEnded = lastBlock; + /* prepare next block */ + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; + if (zcs->inBuffTarget > zcs->inBuffSize) + zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; + DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", + (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize); + if (!lastBlock) + assert(zcs->inBuffTarget <= zcs->inBuffSize); + zcs->inToCompress = zcs->inBuffPos; + if (cDst == op) { /* no need to flush */ + op += cSize; + if (zcs->frameEnded) { + DEBUGLOG(5, "Frame completed directly in outBuffer"); + someMoreWork = 0; + ZSTD_startNewCompression(zcs); + } + break; + } + zcs->outBuffContentSize = cSize; + zcs->outBuffFlushedSize = 0; + zcs->streamStage = zcss_flush; /* pass-through to flush stage */ + } + /* fall-through */ + case zcss_flush: + DEBUGLOG(5, "flush stage"); + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, oend-op, + zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", + (U32)toFlush, (U32)(oend-op), (U32)flushed); + op += flushed; + zcs->outBuffFlushedSize += flushed; + if (toFlush!=flushed) { + /* flush not fully completed, presumably because dst is too small */ + assert(op==oend); + someMoreWork = 0; + break; + } + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + if (zcs->frameEnded) { + DEBUGLOG(5, "Frame completed on flush"); + someMoreWork = 0; + ZSTD_startNewCompression(zcs); + break; + } + zcs->streamStage = zcss_load; + break; + } + + default: /* impossible */ + assert(0); + } + } + + input->pos = ip - istart; + output->pos = op - ostart; + if (zcs->frameEnded) return 0; + { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; + if (hintInSize==0) hintInSize = zcs->blockSize; + return hintInSize; + } +} + +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + /* check conditions */ + if (output->pos > output->size) return ERROR(GENERIC); + if (input->pos > input->size) return ERROR(GENERIC); + + return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue); +} + + +size_t ZSTD_compress_generic (ZSTD_CCtx* cctx, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective endOp) +{ + DEBUGLOG(5, "ZSTD_compress_generic, endOp=%u ", (U32)endOp); + /* check conditions */ + if (output->pos > output->size) return ERROR(GENERIC); + if (input->pos > input->size) return ERROR(GENERIC); + assert(cctx!=NULL); + + /* transparent initialization stage */ + if (cctx->streamStage == zcss_init) { + ZSTD_CCtx_params params = cctx->requestedParams; + ZSTD_prefixDict const prefixDict = cctx->prefixDict; + memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ + assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ + DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage"); + if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */ + params.cParams = ZSTD_getCParamsFromCCtxParams( + &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/); + +#ifdef ZSTD_MULTITHREAD + if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { + params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ + } + if (params.nbWorkers > 0) { + /* mt context creation */ + if (cctx->mtctx == NULL || (params.nbWorkers != ZSTDMT_getNbWorkers(cctx->mtctx))) { + DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u", + params.nbWorkers); + if (cctx->mtctx != NULL) + DEBUGLOG(4, "ZSTD_compress_generic: previous nbWorkers was %u", + ZSTDMT_getNbWorkers(cctx->mtctx)); + ZSTDMT_freeCCtx(cctx->mtctx); + cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem); + if (cctx->mtctx == NULL) return ERROR(memory_allocation); + } + /* mt compression */ + DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers); + CHECK_F( ZSTDMT_initCStream_internal( + cctx->mtctx, + prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent, + cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) ); + cctx->streamStage = zcss_load; + cctx->appliedParams.nbWorkers = params.nbWorkers; + } else +#endif + { CHECK_F( ZSTD_resetCStream_internal(cctx, + prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, + cctx->cdict, + params, cctx->pledgedSrcSizePlusOne-1) ); + assert(cctx->streamStage == zcss_load); + assert(cctx->appliedParams.nbWorkers == 0); + } } + + /* compression stage */ +#ifdef ZSTD_MULTITHREAD + if (cctx->appliedParams.nbWorkers > 0) { + if (cctx->cParamsChanged) { + ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); + cctx->cParamsChanged = 0; + } + { size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); + if ( ZSTD_isError(flushMin) + || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ + ZSTD_startNewCompression(cctx); + } + return flushMin; + } } +#endif + CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) ); + DEBUGLOG(5, "completed ZSTD_compress_generic"); + return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ +} + +size_t ZSTD_compress_generic_simpleArgs ( + ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, size_t* dstPos, + const void* src, size_t srcSize, size_t* srcPos, + ZSTD_EndDirective endOp) +{ + ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; + ZSTD_inBuffer input = { src, srcSize, *srcPos }; + /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ + size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; +} + + +/*====== Finalize ======*/ + +/*! ZSTD_flushStream() : + * @return : amount of data remaining to flush */ +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) +{ + ZSTD_inBuffer input = { NULL, 0, 0 }; + if (output->pos > output->size) return ERROR(GENERIC); + CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) ); + return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ +} + + +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) +{ + ZSTD_inBuffer input = { NULL, 0, 0 }; + if (output->pos > output->size) return ERROR(GENERIC); + CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) ); + { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; + size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4; + size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize; + DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush); + return toFlush; + } +} + + +/*-===== Pre-defined compression levels =====-*/ + +#define ZSTD_MAX_CLEVEL 22 +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } + +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { +{ /* "default" - guarantees a monotonically increasing memory budget */ + /* W, C, H, S, L, TL, strat */ + { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ + { 19, 13, 14, 1, 7, 1, ZSTD_fast }, /* level 1 */ + { 19, 15, 16, 1, 6, 1, ZSTD_fast }, /* level 2 */ + { 20, 16, 17, 1, 5, 8, ZSTD_dfast }, /* level 3 */ + { 20, 17, 18, 1, 5, 8, ZSTD_dfast }, /* level 4 */ + { 20, 17, 18, 2, 5, 16, ZSTD_greedy }, /* level 5 */ + { 21, 17, 19, 2, 5, 16, ZSTD_lazy }, /* level 6 */ + { 21, 18, 19, 3, 5, 16, ZSTD_lazy }, /* level 7 */ + { 21, 18, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ + { 21, 19, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 9 */ + { 21, 19, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ + { 22, 20, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ + { 22, 20, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ + { 22, 21, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */ + { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ + { 22, 22, 22, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ + { 22, 21, 22, 4, 5, 48, ZSTD_btopt }, /* level 16 */ + { 23, 22, 22, 4, 4, 48, ZSTD_btopt }, /* level 17 */ + { 23, 22, 22, 5, 3, 64, ZSTD_btopt }, /* level 18 */ + { 23, 23, 22, 7, 3,128, ZSTD_btopt }, /* level 19 */ + { 25, 25, 23, 7, 3,128, ZSTD_btultra }, /* level 20 */ + { 26, 26, 24, 7, 3,256, ZSTD_btultra }, /* level 21 */ + { 27, 27, 25, 9, 3,512, ZSTD_btultra }, /* level 22 */ +}, +{ /* for srcSize <= 256 KB */ + /* W, C, H, S, L, T, strat */ + { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 18, 13, 14, 1, 6, 1, ZSTD_fast }, /* level 1 */ + { 18, 14, 13, 1, 5, 8, ZSTD_dfast }, /* level 2 */ + { 18, 16, 15, 1, 5, 8, ZSTD_dfast }, /* level 3 */ + { 18, 15, 17, 1, 5, 8, ZSTD_greedy }, /* level 4.*/ + { 18, 16, 17, 4, 5, 8, ZSTD_greedy }, /* level 5.*/ + { 18, 16, 17, 3, 5, 8, ZSTD_lazy }, /* level 6.*/ + { 18, 17, 17, 4, 4, 8, ZSTD_lazy }, /* level 7 */ + { 18, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 18, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 18, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 18, 18, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 11.*/ + { 18, 18, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 12.*/ + { 18, 19, 17, 7, 4, 8, ZSTD_btlazy2 }, /* level 13 */ + { 18, 18, 18, 4, 4, 16, ZSTD_btopt }, /* level 14.*/ + { 18, 18, 18, 4, 3, 16, ZSTD_btopt }, /* level 15.*/ + { 18, 19, 18, 6, 3, 32, ZSTD_btopt }, /* level 16.*/ + { 18, 19, 18, 8, 3, 64, ZSTD_btopt }, /* level 17.*/ + { 18, 19, 18, 9, 3,128, ZSTD_btopt }, /* level 18.*/ + { 18, 19, 18, 10, 3,256, ZSTD_btopt }, /* level 19.*/ + { 18, 19, 18, 11, 3,512, ZSTD_btultra }, /* level 20.*/ + { 18, 19, 18, 12, 3,512, ZSTD_btultra }, /* level 21.*/ + { 18, 19, 18, 13, 3,512, ZSTD_btultra }, /* level 22.*/ +}, +{ /* for srcSize <= 128 KB */ + /* W, C, H, S, L, T, strat */ + { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* level 0 - not used */ + { 17, 12, 13, 1, 6, 1, ZSTD_fast }, /* level 1 */ + { 17, 13, 16, 1, 5, 1, ZSTD_fast }, /* level 2 */ + { 17, 16, 16, 2, 5, 8, ZSTD_dfast }, /* level 3 */ + { 17, 13, 15, 3, 4, 8, ZSTD_greedy }, /* level 4 */ + { 17, 15, 17, 4, 4, 8, ZSTD_greedy }, /* level 5 */ + { 17, 16, 17, 3, 4, 8, ZSTD_lazy }, /* level 6 */ + { 17, 15, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 17, 17, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 11 */ + { 17, 17, 17, 8, 4, 8, ZSTD_lazy2 }, /* level 12 */ + { 17, 18, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13.*/ + { 17, 17, 17, 7, 3, 8, ZSTD_btopt }, /* level 14.*/ + { 17, 17, 17, 7, 3, 16, ZSTD_btopt }, /* level 15.*/ + { 17, 18, 17, 7, 3, 32, ZSTD_btopt }, /* level 16.*/ + { 17, 18, 17, 7, 3, 64, ZSTD_btopt }, /* level 17.*/ + { 17, 18, 17, 7, 3,256, ZSTD_btopt }, /* level 18.*/ + { 17, 18, 17, 8, 3,256, ZSTD_btopt }, /* level 19.*/ + { 17, 18, 17, 9, 3,256, ZSTD_btultra }, /* level 20.*/ + { 17, 18, 17, 10, 3,256, ZSTD_btultra }, /* level 21.*/ + { 17, 18, 17, 11, 3,512, ZSTD_btultra }, /* level 22.*/ +}, +{ /* for srcSize <= 16 KB */ + /* W, C, H, S, L, T, strat */ + { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 14, 14, 14, 1, 6, 1, ZSTD_fast }, /* level 1 */ + { 14, 14, 14, 1, 4, 1, ZSTD_fast }, /* level 2 */ + { 14, 14, 14, 1, 4, 6, ZSTD_dfast }, /* level 3.*/ + { 14, 14, 14, 4, 4, 6, ZSTD_greedy }, /* level 4.*/ + { 14, 14, 14, 3, 4, 6, ZSTD_lazy }, /* level 5.*/ + { 14, 14, 14, 4, 4, 6, ZSTD_lazy2 }, /* level 6 */ + { 14, 14, 14, 5, 4, 6, ZSTD_lazy2 }, /* level 7 */ + { 14, 14, 14, 6, 4, 6, ZSTD_lazy2 }, /* level 8.*/ + { 14, 15, 14, 6, 4, 6, ZSTD_btlazy2 }, /* level 9.*/ + { 14, 15, 14, 3, 3, 6, ZSTD_btopt }, /* level 10.*/ + { 14, 15, 14, 6, 3, 8, ZSTD_btopt }, /* level 11.*/ + { 14, 15, 14, 6, 3, 16, ZSTD_btopt }, /* level 12.*/ + { 14, 15, 14, 6, 3, 24, ZSTD_btopt }, /* level 13.*/ + { 14, 15, 15, 6, 3, 48, ZSTD_btopt }, /* level 14.*/ + { 14, 15, 15, 6, 3, 64, ZSTD_btopt }, /* level 15.*/ + { 14, 15, 15, 6, 3, 96, ZSTD_btopt }, /* level 16.*/ + { 14, 15, 15, 6, 3,128, ZSTD_btopt }, /* level 17.*/ + { 14, 15, 15, 6, 3,256, ZSTD_btopt }, /* level 18.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btopt }, /* level 19.*/ + { 14, 15, 15, 8, 3,256, ZSTD_btultra }, /* level 20.*/ + { 14, 15, 15, 9, 3,256, ZSTD_btultra }, /* level 21.*/ + { 14, 15, 15, 10, 3,256, ZSTD_btultra }, /* level 22.*/ +}, +}; + +/*! ZSTD_getCParams() : +* @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. +* Size values are optional, provide 0 if not known or unused */ +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) +{ + size_t const addedSize = srcSizeHint ? 0 : 500; + U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1; + U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */ + int row = compressionLevel; + DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel); + if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ + if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ + if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; + { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; + if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */ + return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); } + +} + +/*! ZSTD_getParams() : +* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). +* All fields of `ZSTD_frameParameters` are set to default (0) */ +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { + ZSTD_parameters params; + ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize); + DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); + memset(¶ms, 0, sizeof(params)); + params.cParams = cParams; + params.fParams.contentSizeFlag = 1; + return params; +} diff --git a/vendor/github.com/DataDog/zstd/zstd_compress_internal.h b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h new file mode 100644 index 00000000000..81f12ca6df5 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h @@ -0,0 +1,709 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* This header contains definitions + * that shall **only** be used by modules within lib/compress. + */ + +#ifndef ZSTD_COMPRESS_H +#define ZSTD_COMPRESS_H + +/*-************************************* +* Dependencies +***************************************/ +#include "zstd_internal.h" +#ifdef ZSTD_MULTITHREAD +# include "zstdmt_compress.h" +#endif + +#if defined (__cplusplus) +extern "C" { +#endif + +/*-************************************* +* Constants +***************************************/ +#define kSearchStrength 8 +#define HASH_READ_SIZE 8 +#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index 1 now means "unsorted". + It could be confused for a real successor at index "1", if sorted as larger than its predecessor. + It's not a big deal though : candidate will just be sorted again. + Additionnally, candidate position 1 will be lost. + But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss. + The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy */ + + +/*-************************************* +* Context memory management +***************************************/ +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; +typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage; + +typedef struct ZSTD_prefixDict_s { + const void* dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; +} ZSTD_prefixDict; + +typedef struct { + U32 hufCTable[HUF_CTABLE_SIZE_U32(255)]; + FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)]; + FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)]; + FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)]; + HUF_repeat hufCTable_repeatMode; + FSE_repeat offcode_repeatMode; + FSE_repeat matchlength_repeatMode; + FSE_repeat litlength_repeatMode; +} ZSTD_entropyCTables_t; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +typedef struct { + int price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[ZSTD_REP_NUM]; +} ZSTD_optimal_t; + +typedef struct { + /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */ + U32* litFreq; /* table of literals statistics, of size 256 */ + U32* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */ + U32* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */ + U32* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */ + ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */ + ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */ + + U32 litSum; /* nb of literals */ + U32 litLengthSum; /* nb of litLength codes */ + U32 matchLengthSum; /* nb of matchLength codes */ + U32 offCodeSum; /* nb of offset codes */ + /* begin updated by ZSTD_setLog2Prices */ + U32 log2litSum; /* pow2 to compare log2(litfreq) to */ + U32 log2litLengthSum; /* pow2 to compare log2(llfreq) to */ + U32 log2matchLengthSum; /* pow2 to compare log2(mlfreq) to */ + U32 log2offCodeSum; /* pow2 to compare log2(offreq) to */ + /* end : updated by ZSTD_setLog2Prices */ + U32 staticPrices; /* prices follow a pre-defined cost structure, statistics are irrelevant */ +} optState_t; + +typedef struct { + ZSTD_entropyCTables_t entropy; + U32 rep[ZSTD_REP_NUM]; +} ZSTD_compressedBlockState_t; + +typedef struct { + BYTE const* nextSrc; /* next block here to continue on current prefix */ + BYTE const* base; /* All regular indexes relative to this position */ + BYTE const* dictBase; /* extDict indexes relative to this position */ + U32 dictLimit; /* below that point, need extDict */ + U32 lowLimit; /* below that point, no more data */ +} ZSTD_window_t; + +typedef struct { + ZSTD_window_t window; /* State for window round buffer management */ + U32 loadedDictEnd; /* index of end of dictionary */ + U32 nextToUpdate; /* index from which to continue table update */ + U32 nextToUpdate3; /* index from which to continue table update */ + U32 hashLog3; /* dispatch table : larger == faster, more memory */ + U32* hashTable; + U32* hashTable3; + U32* chainTable; + optState_t opt; /* optimal parser state */ +} ZSTD_matchState_t; + +typedef struct { + ZSTD_compressedBlockState_t* prevCBlock; + ZSTD_compressedBlockState_t* nextCBlock; + ZSTD_matchState_t matchState; +} ZSTD_blockState_t; + +typedef struct { + U32 offset; + U32 checksum; +} ldmEntry_t; + +typedef struct { + ZSTD_window_t window; /* State for the window round buffer management */ + ldmEntry_t* hashTable; + BYTE* bucketOffsets; /* Next position in bucket to insert entry */ + U64 hashPower; /* Used to compute the rolling hash. + * Depends on ldmParams.minMatchLength */ +} ldmState_t; + +typedef struct { + U32 enableLdm; /* 1 if enable long distance matching */ + U32 hashLog; /* Log size of hashTable */ + U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ + U32 minMatchLength; /* Minimum match length */ + U32 hashEveryLog; /* Log number of entries to skip */ + U32 windowLog; /* Window log for the LDM */ +} ldmParams_t; + +typedef struct { + U32 offset; + U32 litLength; + U32 matchLength; +} rawSeq; + +typedef struct { + rawSeq* seq; /* The start of the sequences */ + size_t pos; /* The position where reading stopped. <= size. */ + size_t size; /* The number of sequences. <= capacity. */ + size_t capacity; /* The capacity of the `seq` pointer */ +} rawSeqStore_t; + +struct ZSTD_CCtx_params_s { + ZSTD_format_e format; + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; + + int compressionLevel; + int disableLiteralCompression; + int forceWindow; /* force back-references to respect limit of + * 1< 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; +} + +/* ZSTD_MLcode() : + * note : mlBase = matchLength - MINMATCH; + * because it's the format it's stored in seqStore->sequences */ +MEM_STATIC U32 ZSTD_MLcode(U32 mlBase) +{ + static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; + static const U32 ML_deltaCode = 36; + return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; +} + +/*! ZSTD_storeSeq() : + * Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. + * `offsetCode` : distance to match + 3 (values 1-3 are repCodes). + * `mlBase` : matchLength - MINMATCH +*/ +MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase) +{ +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6) + static const BYTE* g_start = NULL; + if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ + { U32 const pos = (U32)((const BYTE*)literals - g_start); + DEBUGLOG(6, "Cpos%7u :%3u literals, match%3u bytes at dist.code%7u", + pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode); + } +#endif + /* copy Literals */ + assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB); + ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); + seqStorePtr->lit += litLength; + + /* literal Length */ + if (litLength>0xFFFF) { + assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ + seqStorePtr->longLengthID = 1; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].litLength = (U16)litLength; + + /* match offset */ + seqStorePtr->sequences[0].offset = offsetCode + 1; + + /* match Length */ + if (mlBase>0xFFFF) { + assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ + seqStorePtr->longLengthID = 2; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].matchLength = (U16)mlBase; + + seqStorePtr->sequences++; +} + + +/*-************************************* +* Match length counter +***************************************/ +static unsigned ZSTD_NbCommonBytes (size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanForward64( &r, (U64)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, + 0, 3, 1, 3, 1, 4, 2, 7, + 0, 2, 3, 6, 1, 5, 3, 5, + 1, 3, 4, 4, 2, 5, 6, 7, + 7, 0, 1, 2, 3, 3, 4, 6, + 2, 6, 5, 5, 3, 4, 5, 6, + 7, 1, 2, 4, 6, 4, 4, 5, + 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r=0; + _BitScanForward( &r, (U32)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, + 3, 2, 2, 1, 3, 2, 0, 1, + 3, 3, 1, 2, 2, 2, 2, 0, + 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (__builtin_clzll(val) >> 3); +# else + unsigned r; + const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ + if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clz((U32)val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } } +} + + +MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) +{ + const BYTE* const pStart = pIn; + const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); + + if (pIn < pInLoopLimit) { + { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (diff) return ZSTD_NbCommonBytes(diff); } + pIn+=sizeof(size_t); pMatch+=sizeof(size_t); + while (pIn < pInLoopLimit) { + size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } + pIn += ZSTD_NbCommonBytes(diff); + return (size_t)(pIn - pStart); + } } + if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } + if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } + if ((pIn> (32-h) ; } +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ + +static const U32 prime4bytes = 2654435761U; +static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } + +static const U64 prime5bytes = 889523592379ULL; +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } + +static const U64 prime6bytes = 227718039650203ULL; +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } + +static const U64 prime7bytes = 58295818150454627ULL; +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } + +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } + +MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) +{ + switch(mls) + { + default: + case 4: return ZSTD_hash4Ptr(p, hBits); + case 5: return ZSTD_hash5Ptr(p, hBits); + case 6: return ZSTD_hash6Ptr(p, hBits); + case 7: return ZSTD_hash7Ptr(p, hBits); + case 8: return ZSTD_hash8Ptr(p, hBits); + } +} + +/*-************************************* +* Round buffer management +***************************************/ +/* Max current allowed */ +#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) +/* Maximum chunk size before overflow correction needs to be called again */ +#define ZSTD_CHUNKSIZE_MAX \ + ( ((U32)-1) /* Maximum ending current index */ \ + - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */ + +/** + * ZSTD_window_clear(): + * Clears the window containing the history by simply setting it to empty. + */ +MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window) +{ + size_t const endT = (size_t)(window->nextSrc - window->base); + U32 const end = (U32)endT; + + window->lowLimit = end; + window->dictLimit = end; +} + +/** + * ZSTD_window_hasExtDict(): + * Returns non-zero if the window has a non-empty extDict. + */ +MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window) +{ + return window.lowLimit < window.dictLimit; +} + +/** + * ZSTD_window_needOverflowCorrection(): + * Returns non-zero if the indices are getting too large and need overflow + * protection. + */ +MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, + void const* srcEnd) +{ + U32 const current = (U32)((BYTE const*)srcEnd - window.base); + return current > ZSTD_CURRENT_MAX; +} + +/** + * ZSTD_window_correctOverflow(): + * Reduces the indices to protect from index overflow. + * Returns the correction made to the indices, which must be applied to every + * stored index. + * + * The least significant cycleLog bits of the indices must remain the same, + * which may be 0. Every index up to maxDist in the past must be valid. + * NOTE: (maxDist & cycleMask) must be zero. + */ +MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, + U32 maxDist, void const* src) +{ + /* preemptive overflow correction: + * 1. correction is large enough: + * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29) - (1< (3<<29) - (1<<30) (NOTE: chainLog <= 30) + * > 1<<29 + * + * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow: + * After correction, current is less than (1<base < 1<<32. + * 3. (cctx->lowLimit + 1< 3<<29 + 1<base); + U32 const newCurrent = (current & cycleMask) + maxDist; + U32 const correction = current - newCurrent; + assert((maxDist & cycleMask) == 0); + assert(current > newCurrent); + /* Loose bound, should be around 1<<29 (see above) */ + assert(correction > 1<<28); + + window->base += correction; + window->dictBase += correction; + window->lowLimit -= correction; + window->dictLimit -= correction; + + DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction, + window->lowLimit); + return correction; +} + +/** + * ZSTD_window_enforceMaxDist(): + * Updates lowLimit so that: + * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd + * This allows a simple check that index >= lowLimit to see if index is valid. + * This must be called before a block compression call, with srcEnd as the block + * source end. + * If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit. + * This is because dictionaries are allowed to be referenced as long as the last + * byte of the dictionary is in the window, but once they are out of range, + * they cannot be referenced. If loadedDictEndPtr is NULL, we use + * loadedDictEnd == 0. + */ +MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window, + void const* srcEnd, U32 maxDist, + U32* loadedDictEndPtr) +{ + U32 const current = (U32)((BYTE const*)srcEnd - window->base); + U32 loadedDictEnd = loadedDictEndPtr != NULL ? *loadedDictEndPtr : 0; + if (current > maxDist + loadedDictEnd) { + U32 const newLowLimit = current - maxDist; + if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit; + if (window->dictLimit < window->lowLimit) { + DEBUGLOG(5, "Update dictLimit from %u to %u", window->dictLimit, + window->lowLimit); + window->dictLimit = window->lowLimit; + } + if (loadedDictEndPtr) + *loadedDictEndPtr = 0; + } +} + +/** + * ZSTD_window_update(): + * Updates the window by appending [src, src + srcSize) to the window. + * If it is not contiguous, the current prefix becomes the extDict, and we + * forget about the extDict. Handles overlap of the prefix and extDict. + * Returns non-zero if the segment is contiguous. + */ +MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, + void const* src, size_t srcSize) +{ + BYTE const* const ip = (BYTE const*)src; + U32 contiguous = 1; + /* Check if blocks follow each other */ + if (src != window->nextSrc) { + /* not contiguous */ + size_t const distanceFromBase = (size_t)(window->nextSrc - window->base); + DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", + window->dictLimit); + window->lowLimit = window->dictLimit; + assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */ + window->dictLimit = (U32)distanceFromBase; + window->dictBase = window->base; + window->base = ip - distanceFromBase; + // ms->nextToUpdate = window->dictLimit; + if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */ + contiguous = 0; + } + window->nextSrc = ip + srcSize; + /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ + if ( (ip+srcSize > window->dictBase + window->lowLimit) + & (ip < window->dictBase + window->dictLimit)) { + ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase; + U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; + window->lowLimit = lowLimitMax; + } + return contiguous; +} + +#if defined (__cplusplus) +} +#endif + + +/* ============================================================== + * Private declarations + * These prototypes shall only be called from within lib/compress + * ============================================================== */ + +/* ZSTD_getCParamsFromCCtxParams() : + * cParams are built depending on compressionLevel, src size hints, + * LDM and manually set compression parameters. + */ +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( + const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize); + +/*! ZSTD_initCStream_internal() : + * Private use only. Init streaming operation. + * expects params to be valid. + * must receive dict, or cdict, or none, but not both. + * @return : 0, or an error code */ +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); + +/*! ZSTD_compressStream_generic() : + * Private use only. To be called from zstdmt_compress.c in single-thread mode. */ +size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective const flushMode); + +/*! ZSTD_getCParamsFromCDict() : + * as the name implies */ +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict); + +/* ZSTD_compressBegin_advanced_internal() : + * Private use only. To be called from zstdmt_compress.c. */ +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, + unsigned long long pledgedSrcSize); + +/* ZSTD_compress_advanced_internal() : + * Private use only. To be called from zstdmt_compress.c. */ +size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_CCtx_params params); + + +/* ZSTD_writeLastEmptyBlock() : + * output an empty Block with end-of-frame mark to complete a frame + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) + * or an error code if `dstCapcity` is too small ( /* memcpy, memmove, memset */ +#include "cpu.h" +#include "mem.h" /* low level memory routines */ +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#include "zstd_internal.h" + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) +# include "zstd_legacy.h" +#endif + + +/*-************************************* +* Errors +***************************************/ +#define ZSTD_isError ERR_isError /* for inlining */ +#define FSE_isError ERR_isError +#define HUF_isError ERR_isError + + +/*_******************************************************* +* Memory operations +**********************************************************/ +static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } + + +/*-************************************************************* +* Context management +***************************************************************/ +typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, + ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, + ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, + ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage; + +typedef enum { zdss_init=0, zdss_loadHeader, + zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; + + +typedef struct { + U32 fastMode; + U32 tableLog; +} ZSTD_seqSymbol_header; + +typedef struct { + U16 nextState; + BYTE nbAdditionalBits; + BYTE nbBits; + U32 baseValue; +} ZSTD_seqSymbol; + +#define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log))) + +typedef struct { + ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; + ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; + ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; + HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ + U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + U32 rep[ZSTD_REP_NUM]; +} ZSTD_entropyDTables_t; + +struct ZSTD_DCtx_s +{ + const ZSTD_seqSymbol* LLTptr; + const ZSTD_seqSymbol* MLTptr; + const ZSTD_seqSymbol* OFTptr; + const HUF_DTable* HUFptr; + ZSTD_entropyDTables_t entropy; + const void* previousDstEnd; /* detect continuity */ + const void* base; /* start of current segment */ + const void* vBase; /* virtual start of previous segment if it was just before current one */ + const void* dictEnd; /* end of previous segment */ + size_t expected; + ZSTD_frameHeader fParams; + U64 decodedSize; + blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */ + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + XXH64_state_t xxhState; + size_t headerSize; + U32 dictID; + ZSTD_format_e format; + const BYTE* litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + size_t staticSize; + int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ + + /* streaming */ + ZSTD_DDict* ddictLocal; + const ZSTD_DDict* ddict; + ZSTD_dStreamStage streamStage; + char* inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char* outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t lhSize; + void* legacyContext; + U32 previousLegacyVersion; + U32 legacyVersion; + U32 hostageByte; + + /* workspace */ + BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; + BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; +}; /* typedef'd to ZSTD_DCtx within "zstd.h" */ + +size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) +{ + if (dctx==NULL) return 0; /* support sizeof NULL */ + return sizeof(*dctx) + + ZSTD_sizeof_DDict(dctx->ddictLocal) + + dctx->inBuffSize + dctx->outBuffSize; +} + +size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } + + +static size_t ZSTD_startingInputLength(ZSTD_format_e format) +{ + size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ? + ZSTD_frameHeaderSize_prefix - ZSTD_frameIdSize : + ZSTD_frameHeaderSize_prefix; + ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE); + /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */ + assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) ); + return startingInputLength; +} + +static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) +{ + dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */ + dctx->staticSize = 0; + dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; + dctx->ddict = NULL; + dctx->ddictLocal = NULL; + dctx->inBuff = NULL; + dctx->inBuffSize = 0; + dctx->outBuffSize = 0; + dctx->streamStage = zdss_init; + dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); +} + +ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) +{ + ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace; + + if ((size_t)workspace & 7) return NULL; /* 8-aligned */ + if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */ + + ZSTD_initDCtx_internal(dctx); + dctx->staticSize = workspaceSize; + dctx->inBuff = (char*)(dctx+1); + return dctx; +} + +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) +{ + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + + { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem); + if (!dctx) return NULL; + dctx->customMem = customMem; + dctx->legacyContext = NULL; + dctx->previousLegacyVersion = 0; + ZSTD_initDCtx_internal(dctx); + return dctx; + } +} + +ZSTD_DCtx* ZSTD_createDCtx(void) +{ + DEBUGLOG(3, "ZSTD_createDCtx"); + return ZSTD_createDCtx_advanced(ZSTD_defaultCMem); +} + +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) +{ + if (dctx==NULL) return 0; /* support free on NULL */ + if (dctx->staticSize) return ERROR(memory_allocation); /* not compatible with static DCtx */ + { ZSTD_customMem const cMem = dctx->customMem; + ZSTD_freeDDict(dctx->ddictLocal); + dctx->ddictLocal = NULL; + ZSTD_free(dctx->inBuff, cMem); + dctx->inBuff = NULL; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (dctx->legacyContext) + ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion); +#endif + ZSTD_free(dctx, cMem); + return 0; + } +} + +/* no longer useful */ +void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) +{ + size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx); + memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ +} + + +/*-************************************************************* + * Frame header decoding + ***************************************************************/ + +/*! ZSTD_isFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. + * Note 3 : Skippable Frame Identifiers are considered valid. */ +unsigned ZSTD_isFrame(const void* buffer, size_t size) +{ + if (size < ZSTD_frameIdSize) return 0; + { U32 const magic = MEM_readLE32(buffer); + if (magic == ZSTD_MAGICNUMBER) return 1; + if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1; + } +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(buffer, size)) return 1; +#endif + return 0; +} + +/** ZSTD_frameHeaderSize_internal() : + * srcSize must be large enough to reach header size fields. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. + * @return : size of the Frame Header + * or an error code, which can be tested with ZSTD_isError() */ +static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format) +{ + size_t const minInputSize = ZSTD_startingInputLength(format); + if (srcSize < minInputSize) return ERROR(srcSize_wrong); + + { BYTE const fhd = ((const BYTE*)src)[minInputSize-1]; + U32 const dictID= fhd & 3; + U32 const singleSegment = (fhd >> 5) & 1; + U32 const fcsId = fhd >> 6; + return minInputSize + !singleSegment + + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + + (singleSegment && !fcsId); + } +} + +/** ZSTD_frameHeaderSize() : + * srcSize must be >= ZSTD_frameHeaderSize_prefix. + * @return : size of the Frame Header */ +size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) +{ + return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1); +} + + +/** ZSTD_getFrameHeader_internal() : + * decode Frame Header, or require larger `srcSize`. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ +static size_t ZSTD_getFrameHeader_internal(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) +{ + const BYTE* ip = (const BYTE*)src; + size_t const minInputSize = ZSTD_startingInputLength(format); + + if (srcSize < minInputSize) return minInputSize; + + if ( (format != ZSTD_f_zstd1_magicless) + && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { + if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + /* skippable frame */ + if (srcSize < ZSTD_skippableHeaderSize) + return ZSTD_skippableHeaderSize; /* magic number + frame length */ + memset(zfhPtr, 0, sizeof(*zfhPtr)); + zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_frameIdSize); + zfhPtr->frameType = ZSTD_skippableFrame; + return 0; + } + return ERROR(prefix_unknown); + } + + /* ensure there is enough `srcSize` to fully read/decode frame header */ + { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); + if (srcSize < fhsize) return fhsize; + zfhPtr->headerSize = (U32)fhsize; + } + + { BYTE const fhdByte = ip[minInputSize-1]; + size_t pos = minInputSize; + U32 const dictIDSizeCode = fhdByte&3; + U32 const checksumFlag = (fhdByte>>2)&1; + U32 const singleSegment = (fhdByte>>5)&1; + U32 const fcsID = fhdByte>>6; + U64 windowSize = 0; + U32 dictID = 0; + U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN; + if ((fhdByte & 0x08) != 0) + return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */ + + if (!singleSegment) { + BYTE const wlByte = ip[pos++]; + U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; + if (windowLog > ZSTD_WINDOWLOG_MAX) + return ERROR(frameParameter_windowTooLarge); + windowSize = (1ULL << windowLog); + windowSize += (windowSize >> 3) * (wlByte&7); + } + switch(dictIDSizeCode) + { + default: assert(0); /* impossible */ + case 0 : break; + case 1 : dictID = ip[pos]; pos++; break; + case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; + case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; + } + switch(fcsID) + { + default: assert(0); /* impossible */ + case 0 : if (singleSegment) frameContentSize = ip[pos]; break; + case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; + case 2 : frameContentSize = MEM_readLE32(ip+pos); break; + case 3 : frameContentSize = MEM_readLE64(ip+pos); break; + } + if (singleSegment) windowSize = frameContentSize; + + zfhPtr->frameType = ZSTD_frame; + zfhPtr->frameContentSize = frameContentSize; + zfhPtr->windowSize = windowSize; + zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); + zfhPtr->dictID = dictID; + zfhPtr->checksumFlag = checksumFlag; + } + return 0; +} + +/** ZSTD_getFrameHeader() : + * decode Frame Header, or require larger `srcSize`. + * note : this function does not consume input, it only reads it. + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) +{ + return ZSTD_getFrameHeader_internal(zfhPtr, src, srcSize, ZSTD_f_zstd1); +} + + +/** ZSTD_getFrameContentSize() : + * compatible with legacy mode + * @return : decompressed size of the single frame pointed to be `src` if known, otherwise + * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined + * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) +{ +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) { + unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize); + return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; + } +#endif + { ZSTD_frameHeader zfh; + if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) + return ZSTD_CONTENTSIZE_ERROR; + if (zfh.frameType == ZSTD_skippableFrame) { + return 0; + } else { + return zfh.frameContentSize; + } } +} + +/** ZSTD_findDecompressedSize() : + * compatible with legacy mode + * `srcSize` must be the exact length of some number of ZSTD compressed and/or + * skippable frames + * @return : decompressed size of the frames contained */ +unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) +{ + unsigned long long totalDstSize = 0; + + while (srcSize >= ZSTD_frameHeaderSize_prefix) { + U32 const magicNumber = MEM_readLE32(src); + + if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + size_t skippableSize; + if (srcSize < ZSTD_skippableHeaderSize) + return ERROR(srcSize_wrong); + skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_frameIdSize) + + ZSTD_skippableHeaderSize; + if (srcSize < skippableSize) { + return ZSTD_CONTENTSIZE_ERROR; + } + + src = (const BYTE *)src + skippableSize; + srcSize -= skippableSize; + continue; + } + + { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); + if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; + + /* check for overflow */ + if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; + totalDstSize += ret; + } + { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); + if (ZSTD_isError(frameSrcSize)) { + return ZSTD_CONTENTSIZE_ERROR; + } + + src = (const BYTE *)src + frameSrcSize; + srcSize -= frameSrcSize; + } + } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ + + if (srcSize) return ZSTD_CONTENTSIZE_ERROR; + + return totalDstSize; +} + +/** ZSTD_getDecompressedSize() : +* compatible with legacy mode +* @return : decompressed size if known, 0 otherwise + note : 0 can mean any of the following : + - frame content is empty + - decompressed size field is not present in frame header + - frame header unknown / not supported + - frame header not complete (`srcSize` too small) */ +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) +{ + unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN); + return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret; +} + + +/** ZSTD_decodeFrameHeader() : +* `headerSize` must be the size provided by ZSTD_frameHeaderSize(). +* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) +{ + size_t const result = ZSTD_getFrameHeader_internal(&(dctx->fParams), src, headerSize, dctx->format); + if (ZSTD_isError(result)) return result; /* invalid header */ + if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */ + if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) + return ERROR(dictionary_wrong); + if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0); + return 0; +} + + +/*-************************************************************* + * Block decoding + ***************************************************************/ + +/*! ZSTD_getcBlockSize() : +* Provides the size of compressed block from block header `src` */ +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, + blockProperties_t* bpPtr) +{ + if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + { U32 const cBlockHeader = MEM_readLE24(src); + U32 const cSize = cBlockHeader >> 3; + bpPtr->lastBlock = cBlockHeader & 1; + bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); + bpPtr->origSize = cSize; /* only useful for RLE */ + if (bpPtr->blockType == bt_rle) return 1; + if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected); + return cSize; + } +} + + +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall); + memcpy(dst, src, srcSize); + return srcSize; +} + + +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + size_t regenSize) +{ + if (srcSize != 1) return ERROR(srcSize_wrong); + if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall); + memset(dst, *(const BYTE*)src, regenSize); + return regenSize; +} + +/*! ZSTD_decodeLiteralsBlock() : + * @return : nb of bytes read from src (< srcSize ) + * note : symbol not declared but exposed for fullbench */ +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, + const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ +{ + if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); + + { const BYTE* const istart = (const BYTE*) src; + symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); + + switch(litEncType) + { + case set_repeat: + if (dctx->litEntropy==0) return ERROR(dictionary_corrupted); + /* fall-through */ + case set_compressed: + if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ + { size_t lhSize, litSize, litCSize; + U32 singleStream=0; + U32 const lhlCode = (istart[0] >> 2) & 3; + U32 const lhc = MEM_readLE32(istart); + switch(lhlCode) + { + case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ + /* 2 - 2 - 10 - 10 */ + singleStream = !lhlCode; + lhSize = 3; + litSize = (lhc >> 4) & 0x3FF; + litCSize = (lhc >> 14) & 0x3FF; + break; + case 2: + /* 2 - 2 - 14 - 14 */ + lhSize = 4; + litSize = (lhc >> 4) & 0x3FFF; + litCSize = lhc >> 18; + break; + case 3: + /* 2 - 2 - 18 - 18 */ + lhSize = 5; + litSize = (lhc >> 4) & 0x3FFFF; + litCSize = (lhc >> 22) + (istart[4] << 10); + break; + } + if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected); + if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); + + if (HUF_isError((litEncType==set_repeat) ? + ( singleStream ? + HUF_decompress1X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) : + HUF_decompress4X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) ) : + ( singleStream ? + HUF_decompress1X2_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, + dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2) : + HUF_decompress4X_hufOnly_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, + dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2)))) + return ERROR(corruption_detected); + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + dctx->litEntropy = 1; + if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); + return litCSize + lhSize; + } + + case set_basic: + { size_t litSize, lhSize; + U32 const lhlCode = ((istart[0]) >> 2) & 3; + switch(lhlCode) + { + case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ + lhSize = 1; + litSize = istart[0] >> 3; + break; + case 1: + lhSize = 2; + litSize = MEM_readLE16(istart) >> 4; + break; + case 3: + lhSize = 3; + litSize = MEM_readLE24(istart) >> 4; + break; + } + + if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ + if (litSize+lhSize > srcSize) return ERROR(corruption_detected); + memcpy(dctx->litBuffer, istart+lhSize, litSize); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); + return lhSize+litSize; + } + /* direct reference into compressed stream */ + dctx->litPtr = istart+lhSize; + dctx->litSize = litSize; + return lhSize+litSize; + } + + case set_rle: + { U32 const lhlCode = ((istart[0]) >> 2) & 3; + size_t litSize, lhSize; + switch(lhlCode) + { + case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ + lhSize = 1; + litSize = istart[0] >> 3; + break; + case 1: + lhSize = 2; + litSize = MEM_readLE16(istart) >> 4; + break; + case 3: + lhSize = 3; + litSize = MEM_readLE24(istart) >> 4; + if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ + break; + } + if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected); + memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + return lhSize+1; + } + default: + return ERROR(corruption_detected); /* impossible */ + } + } +} + +/* Default FSE distribution tables. + * These are pre-calculated FSE decoding tables using default distributions as defined in specification : + * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions + * They were generated programmatically with following method : + * - start from default distributions, present in /lib/common/zstd_internal.h + * - generate tables normally, using ZSTD_buildFSETable() + * - printout the content of tables + * - pretify output, report below, test with fuzzer to ensure it's correct */ + +/* Default FSE distribution table for Literal Lengths */ +static const ZSTD_seqSymbol LL_defaultDTable[(1<tableLog = 0; + DTableH->fastMode = 0; + + cell->nbBits = 0; + cell->nextState = 0; + assert(nbAddBits < 255); + cell->nbAdditionalBits = (BYTE)nbAddBits; + cell->baseValue = baseValue; +} + + +/* ZSTD_buildFSETable() : + * generate FSE decoding table for one symbol (ll, ml or off) */ +static void +ZSTD_buildFSETable(ZSTD_seqSymbol* dt, + const short* normalizedCounter, unsigned maxSymbolValue, + const U32* baseValue, const U32* nbAdditionalBits, + unsigned tableLog) +{ + ZSTD_seqSymbol* const tableDecode = dt+1; + U16 symbolNext[MaxSeq+1]; + + U32 const maxSV1 = maxSymbolValue + 1; + U32 const tableSize = 1 << tableLog; + U32 highThreshold = tableSize-1; + + /* Sanity Checks */ + assert(maxSymbolValue <= MaxSeq); + assert(tableLog <= MaxFSELog); + + /* Init, lay down lowprob symbols */ + { ZSTD_seqSymbol_header DTableH; + DTableH.tableLog = tableLog; + DTableH.fastMode = 1; + { S16 const largeLimit= (S16)(1 << (tableLog-1)); + U32 s; + for (s=0; s= largeLimit) DTableH.fastMode=0; + symbolNext[s] = normalizedCounter[s]; + } } } + memcpy(dt, &DTableH, sizeof(DTableH)); + } + + /* Spread symbols */ + { U32 const tableMask = tableSize-1; + U32 const step = FSE_TABLESTEP(tableSize); + U32 s, position = 0; + for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } } + assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + } + + /* Build Decoding table */ + { U32 u; + for (u=0; u max) return ERROR(corruption_detected); + { U32 const symbol = *(const BYTE*)src; + U32 const baseline = baseValue[symbol]; + U32 const nbBits = nbAdditionalBits[symbol]; + ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); + } + *DTablePtr = DTableSpace; + return 1; + case set_basic : + *DTablePtr = defaultTable; + return 0; + case set_repeat: + if (!flagRepeatTable) return ERROR(corruption_detected); + return 0; + case set_compressed : + { U32 tableLog; + S16 norm[MaxSeq+1]; + size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); + if (FSE_isError(headerSize)) return ERROR(corruption_detected); + if (tableLog > maxLog) return ERROR(corruption_detected); + ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog); + *DTablePtr = DTableSpace; + return headerSize; + } + default : /* impossible */ + assert(0); + return ERROR(GENERIC); + } +} + +static const U32 LL_base[MaxLL+1] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 18, 20, 22, 24, 28, 32, 40, + 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, + 0x2000, 0x4000, 0x8000, 0x10000 }; + +static const U32 OF_base[MaxOff+1] = { + 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, + 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, + 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }; + +static const U32 OF_bits[MaxOff+1] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 }; + +static const U32 ML_base[MaxML+1] = { + 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 37, 39, 41, 43, 47, 51, 59, + 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, + 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; + + +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, + const void* src, size_t srcSize) +{ + const BYTE* const istart = (const BYTE* const)src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip = istart; + DEBUGLOG(5, "ZSTD_decodeSeqHeaders"); + + /* check */ + if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); + + /* SeqHead */ + { int nbSeq = *ip++; + if (!nbSeq) { *nbSeqPtr=0; return 1; } + if (nbSeq > 0x7F) { + if (nbSeq == 0xFF) { + if (ip+2 > iend) return ERROR(srcSize_wrong); + nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; + } else { + if (ip >= iend) return ERROR(srcSize_wrong); + nbSeq = ((nbSeq-0x80)<<8) + *ip++; + } + } + *nbSeqPtr = nbSeq; + } + + /* FSE table descriptors */ + if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */ + { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); + symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); + symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); + ip++; + + /* Build DTables */ + { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, + LLtype, MaxLL, LLFSELog, + ip, iend-ip, + LL_base, LL_bits, + LL_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(llhSize)) return ERROR(corruption_detected); + ip += llhSize; + } + + { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, + OFtype, MaxOff, OffFSELog, + ip, iend-ip, + OF_base, OF_bits, + OF_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected); + ip += ofhSize; + } + + { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, + MLtype, MaxML, MLFSELog, + ip, iend-ip, + ML_base, ML_bits, + ML_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected); + ip += mlhSize; + } + } + + return ip-istart; +} + + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; + const BYTE* match; +} seq_t; + +typedef struct { + size_t state; + const ZSTD_seqSymbol* table; +} ZSTD_fseState; + +typedef struct { + BIT_DStream_t DStream; + ZSTD_fseState stateLL; + ZSTD_fseState stateOffb; + ZSTD_fseState stateML; + size_t prevOffset[ZSTD_REP_NUM]; + const BYTE* prefixStart; + const BYTE* dictEnd; + size_t pos; +} seqState_t; + + +FORCE_NOINLINE +size_t ZSTD_execSequenceLast7(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + /* check */ + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */ + + /* copy literals */ + if (op < oend_w) { + ZSTD_wildcopy(op, *litPtr, oend_w - op); + *litPtr += oend_w - op; + op = oend_w; + } + while (op < oLitEnd) *op++ = *(*litPtr)++; + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); + match = dictEnd - (base-match); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = base; + } } + while (op < oMatchEnd) *op++ = *match++; + return sequenceLength; +} + + +HINT_INLINE +size_t ZSTD_execSequence(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + /* check */ + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); + + /* copy Literals */ + ZSTD_copy8(op, *litPtr); + if (sequence.litLength > 8) + ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix -> go into extDict */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) + return ERROR(corruption_detected); + match = dictEnd + (match - base); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = base; + if (op > oend_w || sequence.matchLength < MINMATCH) { + U32 i; + for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; + return sequenceLength; + } + } } + /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ + + /* match within prefix */ + if (sequence.offset < 8) { + /* close range match, overlap */ + static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ + static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ + int const sub2 = dec64table[sequence.offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[sequence.offset]; + ZSTD_copy4(op+4, match); + match -= sub2; + } else { + ZSTD_copy8(op, match); + } + op += 8; match += 8; + + if (oMatchEnd > oend-(16-MINMATCH)) { + if (op < oend_w) { + ZSTD_wildcopy(op, match, oend_w - op); + match += oend_w - op; + op = oend_w; + } + while (op < oMatchEnd) *op++ = *match++; + } else { + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ + } + return sequenceLength; +} + + +HINT_INLINE +size_t ZSTD_execSequenceLong(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = sequence.match; + + /* check */ + if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd); + + /* copy Literals */ + ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */ + if (sequence.litLength > 8) + ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { + /* offset beyond prefix */ + if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + if (op > oend_w || sequence.matchLength < MINMATCH) { + U32 i; + for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; + return sequenceLength; + } + } } + assert(op <= oend_w); + assert(sequence.matchLength >= MINMATCH); + + /* match within prefix */ + if (sequence.offset < 8) { + /* close range match, overlap */ + static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ + static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ + int const sub2 = dec64table[sequence.offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[sequence.offset]; + ZSTD_copy4(op+4, match); + match -= sub2; + } else { + ZSTD_copy8(op, match); + } + op += 8; match += 8; + + if (oMatchEnd > oend-(16-MINMATCH)) { + if (op < oend_w) { + ZSTD_wildcopy(op, match, oend_w - op); + match += oend_w - op; + op = oend_w; + } + while (op < oMatchEnd) *op++ = *match++; + } else { + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ + } + return sequenceLength; +} + +static void +ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) +{ + const void* ptr = dt; + const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr; + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); + DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits", + (U32)DStatePtr->state, DTableH->tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +FORCE_INLINE_TEMPLATE void +ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD) +{ + ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + size_t const lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.nextState + lowBits; +} + +/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum + * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1) + * bits before reloading. This value is the maximum number of bytes we read + * after reloading when we are decoding long offets. + */ +#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \ + (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \ + ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \ + : 0) + +typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; + +FORCE_INLINE_TEMPLATE seq_t +ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) +{ + seq_t seq; + U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits; + U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits; + U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits; + U32 const totalBits = llBits+mlBits+ofBits; + U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue; + U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue; + U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue; + + /* sequence */ + { size_t offset; + if (!ofBits) + offset = 0; + else { + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + assert(ofBits <= MaxOff); + if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { + U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); + offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + BIT_reloadDStream(&seqState->DStream); + if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); + assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ + } else { + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } + } + + if (ofBits <= 1) { + offset += (llBase==0); + if (offset) { + size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } else { /* offset == 0 */ + offset = seqState->prevOffset[0]; + } + } else { + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } + seq.offset = offset; + } + + seq.matchLength = mlBase + + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0); /* <= 16 bits */ + if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); + + seq.litLength = llBase + + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0); /* <= 16 bits */ + if (MEM_32bits()) + BIT_reloadDStream(&seqState->DStream); + + DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + + /* ANS state update */ + ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ + ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ + + return seq; +} + +FORCE_INLINE_TEMPLATE size_t +ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const base = (const BYTE*) (dctx->base); + const BYTE* const vBase = (const BYTE*) (dctx->vBase); + const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); + DEBUGLOG(5, "ZSTD_decompressSequences"); + + /* Regen sequences */ + if (nbSeq) { + seqState_t seqState; + dctx->fseEntropy = 1; + { U32 i; for (i=0; ientropy.rep[i]; } + CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + + for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { + nbSeq--; + { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } } + + /* check if reached exact end */ + DEBUGLOG(5, "ZSTD_decompressSequences: after decode loop, remaining nbSeq : %i", nbSeq); + if (nbSeq) return ERROR(corruption_detected); + /* save reps for next block */ + { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + { size_t const lastLLSize = litEnd - litPtr; + if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + + return op-ostart; +} + +static size_t +ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} + + + +FORCE_INLINE_TEMPLATE seq_t +ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets) +{ + seq_t seq; + U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits; + U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits; + U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits; + U32 const totalBits = llBits+mlBits+ofBits; + U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue; + U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue; + U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue; + + /* sequence */ + { size_t offset; + if (!ofBits) + offset = 0; + else { + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + assert(ofBits <= MaxOff); + if (MEM_32bits() && longOffsets) { + U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1); + offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream); + if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); + } else { + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } + } + + if (ofBits <= 1) { + offset += (llBase==0); + if (offset) { + size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } else { + offset = seqState->prevOffset[0]; + } + } else { + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } + seq.offset = offset; + } + + seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ + if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); + + seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ + if (MEM_32bits()) + BIT_reloadDStream(&seqState->DStream); + + { size_t const pos = seqState->pos + seq.litLength; + const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart; + seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. + * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */ + seqState->pos = pos + seq.matchLength; + } + + /* ANS state update */ + ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ + ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ + + return seq; +} + +FORCE_INLINE_TEMPLATE size_t +ZSTD_decompressSequencesLong_body( + ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const prefixStart = (const BYTE*) (dctx->base); + const BYTE* const dictStart = (const BYTE*) (dctx->vBase); + const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); + + /* Regen sequences */ + if (nbSeq) { +#define STORED_SEQS 4 +#define STOSEQ_MASK (STORED_SEQS-1) +#define ADVANCED_SEQS 4 + seq_t sequences[STORED_SEQS]; + int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); + seqState_t seqState; + int seqNb; + dctx->fseEntropy = 1; + { U32 i; for (i=0; ientropy.rep[i]; } + seqState.prefixStart = prefixStart; + seqState.pos = (size_t)(op-prefixStart); + seqState.dictEnd = dictEnd; + CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + + /* prepare in advance */ + for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNbentropy.rep[i] = (U32)(seqState.prevOffset[i]); } +#undef STORED_SEQS +#undef STOSEQ_MASK +#undef ADVANCED_SEQS + } + + /* last literal segment */ + { size_t const lastLLSize = litEnd - litPtr; + if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + + return op-ostart; +} + +static size_t +ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} + + + +#if DYNAMIC_BMI2 + +static TARGET_ATTRIBUTE("bmi2") size_t +ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} + +static TARGET_ATTRIBUTE("bmi2") size_t +ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} + +#endif + +typedef size_t (*ZSTD_decompressSequences_t)( + ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, + const void *seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset); + +static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + DEBUGLOG(5, "ZSTD_decompressSequences"); +#if DYNAMIC_BMI2 + if (dctx->bmi2) { + return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } +#endif + return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} + +static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset) +{ + DEBUGLOG(5, "ZSTD_decompressSequencesLong"); +#if DYNAMIC_BMI2 + if (dctx->bmi2) { + return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); + } +#endif + return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset); +} + +/* ZSTD_getLongOffsetsShare() : + * condition : offTable must be valid + * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) + * compared to maximum possible of (1< 22) total += 1; + } + + assert(tableLog <= OffFSELog); + total <<= (OffFSELog - tableLog); /* scale to OffFSELog */ + + return total; +} + + +static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, const int frame) +{ /* blockType == blockCompressed */ + const BYTE* ip = (const BYTE*)src; + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. + * We don't expect that to be the case in 64-bit mode. + * In block mode, window size is not known, so we have to be conservative. (note: but it could be evaluated from current-lowLimit) + */ + ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))); + DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); + + if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); + + /* Decode literals section */ + { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); + if (ZSTD_isError(litCSize)) return litCSize; + ip += litCSize; + srcSize -= litCSize; + } + + /* Build Decoding Tables */ + { int nbSeq; + size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); + if (ZSTD_isError(seqHSize)) return seqHSize; + ip += seqHSize; + srcSize -= seqHSize; + + if ( (!frame || dctx->fParams.windowSize > (1<<24)) + && (nbSeq>0) ) { /* could probably use a larger nbSeq limit */ + U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr); + U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ + if (shareLongOffsets >= minShare) + return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + } + + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset); + } +} + + +static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) +{ + if (dst != dctx->previousDstEnd) { /* not contiguous */ + dctx->dictEnd = dctx->previousDstEnd; + dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); + dctx->base = dst; + dctx->previousDstEnd = dst; + } +} + +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t dSize; + ZSTD_checkContinuity(dctx, dst); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0); + dctx->previousDstEnd = (char*)dst + dSize; + return dSize; +} + + +/** ZSTD_insertBlock() : + insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ +ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) +{ + ZSTD_checkContinuity(dctx, blockStart); + dctx->previousDstEnd = (const char*)blockStart + blockSize; + return blockSize; +} + + +static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length) +{ + if (length > dstCapacity) return ERROR(dstSize_tooSmall); + memset(dst, byte, length); + return length; +} + +/** ZSTD_findFrameCompressedSize() : + * compatible with legacy mode + * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame + * `srcSize` must be at least as large as the frame contained + * @return : the compressed size of the frame starting at `src` */ +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) +{ +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) + return ZSTD_findFrameCompressedSizeLegacy(src, srcSize); +#endif + if ( (srcSize >= ZSTD_skippableHeaderSize) + && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) { + return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize); + } else { + const BYTE* ip = (const BYTE*)src; + const BYTE* const ipstart = ip; + size_t remainingSize = srcSize; + ZSTD_frameHeader zfh; + + /* Extract Frame Header */ + { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ZSTD_isError(ret)) return ret; + if (ret > 0) return ERROR(srcSize_wrong); + } + + ip += zfh.headerSize; + remainingSize -= zfh.headerSize; + + /* Loop on each block */ + while (1) { + blockProperties_t blockProperties; + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + + if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) + return ERROR(srcSize_wrong); + + ip += ZSTD_blockHeaderSize + cBlockSize; + remainingSize -= ZSTD_blockHeaderSize + cBlockSize; + + if (blockProperties.lastBlock) break; + } + + if (zfh.checksumFlag) { /* Final frame content checksum */ + if (remainingSize < 4) return ERROR(srcSize_wrong); + ip += 4; + remainingSize -= 4; + } + + return ip - ipstart; + } +} + +/*! ZSTD_decompressFrame() : +* @dctx must be properly initialized */ +static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void** srcPtr, size_t *srcSizePtr) +{ + const BYTE* ip = (const BYTE*)(*srcPtr); + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + size_t remainingSize = *srcSizePtr; + + /* check */ + if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) + return ERROR(srcSize_wrong); + + /* Frame Header */ + { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); + if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; + if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) + return ERROR(srcSize_wrong); + CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) ); + ip += frameHeaderSize; remainingSize -= frameHeaderSize; + } + + /* Loop on each block */ + while (1) { + size_t decodedSize; + blockProperties_t blockProperties; + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + + ip += ZSTD_blockHeaderSize; + remainingSize -= ZSTD_blockHeaderSize; + if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); + + switch(blockProperties.blockType) + { + case bt_compressed: + decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1); + break; + case bt_raw : + decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); + break; + case bt_rle : + decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize); + break; + case bt_reserved : + default: + return ERROR(corruption_detected); + } + + if (ZSTD_isError(decodedSize)) return decodedSize; + if (dctx->fParams.checksumFlag) + XXH64_update(&dctx->xxhState, op, decodedSize); + op += decodedSize; + ip += cBlockSize; + remainingSize -= cBlockSize; + if (blockProperties.lastBlock) break; + } + + if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { + if ((U64)(op-ostart) != dctx->fParams.frameContentSize) { + return ERROR(corruption_detected); + } } + if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ + U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); + U32 checkRead; + if (remainingSize<4) return ERROR(checksum_wrong); + checkRead = MEM_readLE32(ip); + if (checkRead != checkCalc) return ERROR(checksum_wrong); + ip += 4; + remainingSize -= 4; + } + + /* Allow caller to get size read */ + *srcPtr = ip; + *srcSizePtr = remainingSize; + return op-ostart; +} + +static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict); +static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict); + +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + const ZSTD_DDict* ddict) +{ + void* const dststart = dst; + assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */ + + if (ddict) { + dict = ZSTD_DDictDictContent(ddict); + dictSize = ZSTD_DDictDictSize(ddict); + } + + while (srcSize >= ZSTD_frameHeaderSize_prefix) { + U32 magicNumber; + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) { + size_t decodedSize; + size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); + if (ZSTD_isError(frameSize)) return frameSize; + /* legacy support is not compatible with static dctx */ + if (dctx->staticSize) return ERROR(memory_allocation); + + decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); + + dst = (BYTE*)dst + decodedSize; + dstCapacity -= decodedSize; + + src = (const BYTE*)src + frameSize; + srcSize -= frameSize; + + continue; + } +#endif + + magicNumber = MEM_readLE32(src); + DEBUGLOG(4, "reading magic number %08X (expecting %08X)", + (U32)magicNumber, (U32)ZSTD_MAGICNUMBER); + if (magicNumber != ZSTD_MAGICNUMBER) { + if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + size_t skippableSize; + if (srcSize < ZSTD_skippableHeaderSize) + return ERROR(srcSize_wrong); + skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize) + + ZSTD_skippableHeaderSize; + if (srcSize < skippableSize) return ERROR(srcSize_wrong); + + src = (const BYTE *)src + skippableSize; + srcSize -= skippableSize; + continue; + } + return ERROR(prefix_unknown); + } + + if (ddict) { + /* we were called from ZSTD_decompress_usingDDict */ + CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict)); + } else { + /* this will initialize correctly with no dict if dict == NULL, so + * use this in all cases but ddict */ + CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); + } + ZSTD_checkContinuity(dctx, dst); + + { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, + &src, &srcSize); + if (ZSTD_isError(res)) return res; + /* no need to bound check, ZSTD_decompressFrame already has */ + dst = (BYTE*)dst + res; + dstCapacity -= res; + } + } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ + + if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */ + + return (BYTE*)dst - (BYTE*)dststart; +} + +size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize) +{ + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); +} + + +size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); +} + + +size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ +#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) + size_t regenSize; + ZSTD_DCtx* const dctx = ZSTD_createDCtx(); + if (dctx==NULL) return ERROR(memory_allocation); + regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); + ZSTD_freeDCtx(dctx); + return regenSize; +#else /* stack mode */ + ZSTD_DCtx dctx; + return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); +#endif +} + + +/*-************************************** +* Advanced Streaming Decompression API +* Bufferless and synchronous +****************************************/ +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } + +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { + switch(dctx->stage) + { + default: /* should not happen */ + assert(0); + case ZSTDds_getFrameHeaderSize: + case ZSTDds_decodeFrameHeader: + return ZSTDnit_frameHeader; + case ZSTDds_decodeBlockHeader: + return ZSTDnit_blockHeader; + case ZSTDds_decompressBlock: + return ZSTDnit_block; + case ZSTDds_decompressLastBlock: + return ZSTDnit_lastBlock; + case ZSTDds_checkChecksum: + return ZSTDnit_checksum; + case ZSTDds_decodeSkippableHeader: + case ZSTDds_skipFrame: + return ZSTDnit_skippableFrame; + } +} + +static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } + +/** ZSTD_decompressContinue() : + * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) + * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) + * or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize); + /* Sanity check */ + if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* not allowed */ + if (dstCapacity) ZSTD_checkContinuity(dctx, dst); + + switch (dctx->stage) + { + case ZSTDds_getFrameHeaderSize : + assert(src != NULL); + if (dctx->format == ZSTD_f_zstd1) { /* allows header */ + assert(srcSize >= ZSTD_frameIdSize); /* to read skippable magic number */ + if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + memcpy(dctx->headerBuffer, src, srcSize); + dctx->expected = ZSTD_skippableHeaderSize - srcSize; /* remaining to load to get full skippable frame header */ + dctx->stage = ZSTDds_decodeSkippableHeader; + return 0; + } } + dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); + if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; + memcpy(dctx->headerBuffer, src, srcSize); + dctx->expected = dctx->headerSize - srcSize; + dctx->stage = ZSTDds_decodeFrameHeader; + return 0; + + case ZSTDds_decodeFrameHeader: + assert(src != NULL); + memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); + CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTDds_decodeBlockHeader; + return 0; + + case ZSTDds_decodeBlockHeader: + { blockProperties_t bp; + size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + dctx->expected = cBlockSize; + dctx->bType = bp.blockType; + dctx->rleSize = bp.origSize; + if (cBlockSize) { + dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; + return 0; + } + /* empty block */ + if (bp.lastBlock) { + if (dctx->fParams.checksumFlag) { + dctx->expected = 4; + dctx->stage = ZSTDds_checkChecksum; + } else { + dctx->expected = 0; /* end of frame */ + dctx->stage = ZSTDds_getFrameHeaderSize; + } + } else { + dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */ + dctx->stage = ZSTDds_decodeBlockHeader; + } + return 0; + } + + case ZSTDds_decompressLastBlock: + case ZSTDds_decompressBlock: + DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock"); + { size_t rSize; + switch(dctx->bType) + { + case bt_compressed: + DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); + break; + case bt_raw : + rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + break; + case bt_rle : + rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); + break; + case bt_reserved : /* should never happen */ + default: + return ERROR(corruption_detected); + } + if (ZSTD_isError(rSize)) return rSize; + DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize); + dctx->decodedSize += rSize; + if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); + + if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ + DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize); + if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { + if (dctx->decodedSize != dctx->fParams.frameContentSize) { + return ERROR(corruption_detected); + } } + if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ + dctx->expected = 4; + dctx->stage = ZSTDds_checkChecksum; + } else { + dctx->expected = 0; /* ends here */ + dctx->stage = ZSTDds_getFrameHeaderSize; + } + } else { + dctx->stage = ZSTDds_decodeBlockHeader; + dctx->expected = ZSTD_blockHeaderSize; + dctx->previousDstEnd = (char*)dst + rSize; + } + return rSize; + } + + case ZSTDds_checkChecksum: + assert(srcSize == 4); /* guaranteed by dctx->expected */ + { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); + U32 const check32 = MEM_readLE32(src); + DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32); + if (check32 != h32) return ERROR(checksum_wrong); + dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + } + + case ZSTDds_decodeSkippableHeader: + assert(src != NULL); + assert(srcSize <= ZSTD_skippableHeaderSize); + memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize); /* complete skippable header */ + dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_frameIdSize); /* note : dctx->expected can grow seriously large, beyond local buffer size */ + dctx->stage = ZSTDds_skipFrame; + return 0; + + case ZSTDds_skipFrame: + dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + + default: + return ERROR(GENERIC); /* impossible */ + } +} + + +static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + dctx->dictEnd = dctx->previousDstEnd; + dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); + dctx->base = dict; + dctx->previousDstEnd = (const char*)dict + dictSize; + return 0; +} + +/* ZSTD_loadEntropy() : + * dict : must point at beginning of a valid zstd dictionary + * @return : size of entropy tables read */ +static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + + if (dictSize <= 8) return ERROR(dictionary_corrupted); + dictPtr += 8; /* skip header = magic + dictID */ + + + { size_t const hSize = HUF_readDTableX4_wksp( + entropy->hufTable, dictPtr, dictEnd - dictPtr, + entropy->workspace, sizeof(entropy->workspace)); + if (HUF_isError(hSize)) return ERROR(dictionary_corrupted); + dictPtr += hSize; + } + + { short offcodeNCount[MaxOff+1]; + U32 offcodeMaxValue = MaxOff, offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); + if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted); + if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); + ZSTD_buildFSETable(entropy->OFTable, + offcodeNCount, offcodeMaxValue, + OF_base, OF_bits, + offcodeLog); + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted); + if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); + ZSTD_buildFSETable(entropy->MLTable, + matchlengthNCount, matchlengthMaxValue, + ML_base, ML_bits, + matchlengthLog); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted); + if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); + ZSTD_buildFSETable(entropy->LLTable, + litlengthNCount, litlengthMaxValue, + LL_base, LL_bits, + litlengthLog); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); + { int i; + size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); + for (i=0; i<3; i++) { + U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; + if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted); + entropy->rep[i] = rep; + } } + + return dictPtr - (const BYTE*)dict; +} + +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); + { U32 const magic = MEM_readLE32(dict); + if (magic != ZSTD_MAGIC_DICTIONARY) { + return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ + } } + dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_frameIdSize); + + /* load entropy tables */ + { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); + if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted); + dict = (const char*)dict + eSize; + dictSize -= eSize; + } + dctx->litEntropy = dctx->fseEntropy = 1; + + /* reference dictionary content */ + return ZSTD_refDictContent(dctx, dict, dictSize); +} + +/* Note : this function cannot fail */ +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) +{ + assert(dctx != NULL); + dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */ + dctx->stage = ZSTDds_getFrameHeaderSize; + dctx->decodedSize = 0; + dctx->previousDstEnd = NULL; + dctx->base = NULL; + dctx->vBase = NULL; + dctx->dictEnd = NULL; + dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + dctx->litEntropy = dctx->fseEntropy = 0; + dctx->dictID = 0; + ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); + memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ + dctx->LLTptr = dctx->entropy.LLTable; + dctx->MLTptr = dctx->entropy.MLTable; + dctx->OFTptr = dctx->entropy.OFTable; + dctx->HUFptr = dctx->entropy.hufTable; + return 0; +} + +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + CHECK_F( ZSTD_decompressBegin(dctx) ); + if (dict && dictSize) + CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted); + return 0; +} + + +/* ====== ZSTD_DDict ====== */ + +struct ZSTD_DDict_s { + void* dictBuffer; + const void* dictContent; + size_t dictSize; + ZSTD_entropyDTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; /* typedef'd to ZSTD_DDict within "zstd.h" */ + +static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict) +{ + return ddict->dictContent; +} + +static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict) +{ + return ddict->dictSize; +} + +size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict) +{ + CHECK_F( ZSTD_decompressBegin(dstDCtx) ); + if (ddict) { /* support begin on NULL */ + dstDCtx->dictID = ddict->dictID; + dstDCtx->base = ddict->dictContent; + dstDCtx->vBase = ddict->dictContent; + dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; + dstDCtx->previousDstEnd = dstDCtx->dictEnd; + if (ddict->entropyPresent) { + dstDCtx->litEntropy = 1; + dstDCtx->fseEntropy = 1; + dstDCtx->LLTptr = ddict->entropy.LLTable; + dstDCtx->MLTptr = ddict->entropy.MLTable; + dstDCtx->OFTptr = ddict->entropy.OFTable; + dstDCtx->HUFptr = ddict->entropy.hufTable; + dstDCtx->entropy.rep[0] = ddict->entropy.rep[0]; + dstDCtx->entropy.rep[1] = ddict->entropy.rep[1]; + dstDCtx->entropy.rep[2] = ddict->entropy.rep[2]; + } else { + dstDCtx->litEntropy = 0; + dstDCtx->fseEntropy = 0; + } + } + return 0; +} + +static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType) +{ + ddict->dictID = 0; + ddict->entropyPresent = 0; + if (dictContentType == ZSTD_dct_rawContent) return 0; + + if (ddict->dictSize < 8) { + if (dictContentType == ZSTD_dct_fullDict) + return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ + return 0; /* pure content mode */ + } + { U32 const magic = MEM_readLE32(ddict->dictContent); + if (magic != ZSTD_MAGIC_DICTIONARY) { + if (dictContentType == ZSTD_dct_fullDict) + return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ + return 0; /* pure content mode */ + } + } + ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_frameIdSize); + + /* load entropy tables */ + CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted ); + ddict->entropyPresent = 1; + return 0; +} + + +static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType) +{ + if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) { + ddict->dictBuffer = NULL; + ddict->dictContent = dict; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem); + ddict->dictBuffer = internalBuffer; + ddict->dictContent = internalBuffer; + if (!internalBuffer) return ERROR(memory_allocation); + memcpy(internalBuffer, dict, dictSize); + } + ddict->dictSize = dictSize; + ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + + /* parse dictionary content */ + CHECK_F( ZSTD_loadEntropy_inDDict(ddict, dictContentType) ); + + return 0; +} + +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_customMem customMem) +{ + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + + { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); + if (!ddict) return NULL; + ddict->cMem = customMem; + + if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType) )) { + ZSTD_freeDDict(ddict); + return NULL; + } + + return ddict; + } +} + +/*! ZSTD_createDDict() : +* Create a digested dictionary, to start decompression without startup delay. +* `dict` content is copied inside DDict. +* Consequently, `dict` can be released after `ZSTD_DDict` creation */ +ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator); +} + +/*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, to start decompression without startup delay. + * Dictionary content is simply referenced, it will be accessed during decompression. + * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ +ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator); +} + + +const ZSTD_DDict* ZSTD_initStaticDDict( + void* workspace, size_t workspaceSize, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType) +{ + size_t const neededSpace = + sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); + ZSTD_DDict* const ddict = (ZSTD_DDict*)workspace; + assert(workspace != NULL); + assert(dict != NULL); + if ((size_t)workspace & 7) return NULL; /* 8-aligned */ + if (workspaceSize < neededSpace) return NULL; + if (dictLoadMethod == ZSTD_dlm_byCopy) { + memcpy(ddict+1, dict, dictSize); /* local copy */ + dict = ddict+1; + } + if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) )) + return NULL; + return ddict; +} + + +size_t ZSTD_freeDDict(ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = ddict->cMem; + ZSTD_free(ddict->dictBuffer, cMem); + ZSTD_free(ddict, cMem); + return 0; + } +} + +/*! ZSTD_estimateDDictSize() : + * Estimate amount of memory that will be needed to create a dictionary for decompression. + * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ +size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) +{ + return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); +} + +size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; /* support sizeof on NULL */ + return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ; +} + +/*! ZSTD_getDictID_fromDict() : + * Provides the dictID stored within dictionary. + * if @return == 0, the dictionary is not conformant with Zstandard specification. + * It can still be loaded, but as a content-only dictionary. */ +unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) +{ + if (dictSize < 8) return 0; + if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0; + return MEM_readLE32((const char*)dict + ZSTD_frameIdSize); +} + +/*! ZSTD_getDictID_fromDDict() : + * Provides the dictID of the dictionary loaded into `ddict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; + return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); +} + +/*! ZSTD_getDictID_fromFrame() : + * Provides the dictID required to decompresse frame stored within `src`. + * If @return == 0, the dictID could not be decoded. + * This could for one of the following reasons : + * - The frame does not require a dictionary (most common case). + * - The frame was built with dictID intentionally removed. + * Needed dictionary is a hidden information. + * Note : this use case also happens when using a non-conformant dictionary. + * - `srcSize` is too small, and as a result, frame header could not be decoded. + * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. + * - This is not a Zstandard frame. + * When identifying the exact failure cause, it's possible to use + * ZSTD_getFrameHeader(), which will provide a more precise error code. */ +unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) +{ + ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; + size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); + if (ZSTD_isError(hError)) return 0; + return zfp.dictID; +} + + +/*! ZSTD_decompress_usingDDict() : +* Decompression using a pre-digested Dictionary +* Use dictionary without significant overhead. */ +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_DDict* ddict) +{ + /* pass content and size in case legacy frames are encountered */ + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, + NULL, 0, + ddict); +} + + +/*===================================== +* Streaming decompression +*====================================*/ + +ZSTD_DStream* ZSTD_createDStream(void) +{ + DEBUGLOG(3, "ZSTD_createDStream"); + return ZSTD_createDStream_advanced(ZSTD_defaultCMem); +} + +ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) +{ + return ZSTD_initStaticDCtx(workspace, workspaceSize); +} + +ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createDCtx_advanced(customMem); +} + +size_t ZSTD_freeDStream(ZSTD_DStream* zds) +{ + return ZSTD_freeDCtx(zds); +} + + +/* *** Initialization *** */ + +size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; } +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; } + +size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) +{ + if (dctx->streamStage != zdss_init) return ERROR(stage_wrong); + ZSTD_freeDDict(dctx->ddictLocal); + if (dict && dictSize >= 8) { + dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); + if (dctx->ddictLocal == NULL) return ERROR(memory_allocation); + } else { + dctx->ddictLocal = NULL; + } + dctx->ddict = dctx->ddictLocal; + return 0; +} + +size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); +} + +size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); +} + +size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) +{ + return ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType); +} + +size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize) +{ + return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent); +} + + +/* ZSTD_initDStream_usingDict() : + * return : expected size, aka ZSTD_frameHeaderSize_prefix. + * this function cannot fail */ +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) +{ + DEBUGLOG(4, "ZSTD_initDStream_usingDict"); + zds->streamStage = zdss_init; + CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) ); + return ZSTD_frameHeaderSize_prefix; +} + +/* note : this variant can't fail */ +size_t ZSTD_initDStream(ZSTD_DStream* zds) +{ + DEBUGLOG(4, "ZSTD_initDStream"); + return ZSTD_initDStream_usingDict(zds, NULL, 0); +} + +size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) +{ + if (dctx->streamStage != zdss_init) return ERROR(stage_wrong); + dctx->ddict = ddict; + return 0; +} + +/* ZSTD_initDStream_usingDDict() : + * ddict will just be referenced, and must outlive decompression session + * this function cannot fail */ +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) +{ + size_t const initResult = ZSTD_initDStream(dctx); + dctx->ddict = ddict; + return initResult; +} + +/* ZSTD_resetDStream() : + * return : expected size, aka ZSTD_frameHeaderSize_prefix. + * this function cannot fail */ +size_t ZSTD_resetDStream(ZSTD_DStream* dctx) +{ + DEBUGLOG(4, "ZSTD_resetDStream"); + dctx->streamStage = zdss_loadHeader; + dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0; + dctx->legacyVersion = 0; + dctx->hostageByte = 0; + return ZSTD_frameHeaderSize_prefix; +} + +size_t ZSTD_setDStreamParameter(ZSTD_DStream* dctx, + ZSTD_DStreamParameter_e paramType, unsigned paramValue) +{ + if (dctx->streamStage != zdss_init) return ERROR(stage_wrong); + switch(paramType) + { + default : return ERROR(parameter_unsupported); + case DStream_p_maxWindowSize : + DEBUGLOG(4, "setting maxWindowSize = %u KB", paramValue >> 10); + dctx->maxWindowSize = paramValue ? paramValue : (U32)(-1); + break; + } + return 0; +} + +size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize) +{ + if (dctx->streamStage != zdss_init) return ERROR(stage_wrong); + dctx->maxWindowSize = maxWindowSize; + return 0; +} + +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format) +{ + DEBUGLOG(4, "ZSTD_DCtx_setFormat : %u", (unsigned)format); + if (dctx->streamStage != zdss_init) return ERROR(stage_wrong); + dctx->format = format; + return 0; +} + + +size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) +{ + return ZSTD_sizeof_DCtx(dctx); +} + +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) +{ + size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); + unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); + unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); + size_t const minRBSize = (size_t) neededSize; + if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge); + return minRBSize; +} + +size_t ZSTD_estimateDStreamSize(size_t windowSize) +{ + size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); + size_t const inBuffSize = blockSize; /* no block can be larger */ + size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN); + return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; +} + +size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) +{ + U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable */ + ZSTD_frameHeader zfh; + size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ZSTD_isError(err)) return err; + if (err>0) return ERROR(srcSize_wrong); + if (zfh.windowSize > windowSizeMax) + return ERROR(frameParameter_windowTooLarge); + return ZSTD_estimateDStreamSize((size_t)zfh.windowSize); +} + + +/* ***** Decompression ***** */ + +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const length = MIN(dstCapacity, srcSize); + memcpy(dst, src, length); + return length; +} + + +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + const char* const istart = (const char*)(input->src) + input->pos; + const char* const iend = (const char*)(input->src) + input->size; + const char* ip = istart; + char* const ostart = (char*)(output->dst) + output->pos; + char* const oend = (char*)(output->dst) + output->size; + char* op = ostart; + U32 someMoreWork = 1; + + DEBUGLOG(5, "ZSTD_decompressStream"); + if (input->pos > input->size) { /* forbidden */ + DEBUGLOG(5, "in: pos: %u vs size: %u", + (U32)input->pos, (U32)input->size); + return ERROR(srcSize_wrong); + } + if (output->pos > output->size) { /* forbidden */ + DEBUGLOG(5, "out: pos: %u vs size: %u", + (U32)output->pos, (U32)output->size); + return ERROR(dstSize_tooSmall); + } + DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos)); + + while (someMoreWork) { + switch(zds->streamStage) + { + case zdss_init : + DEBUGLOG(5, "stage zdss_init => transparent reset "); + ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ + /* fall-through */ + + case zdss_loadHeader : + DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) + if (zds->legacyVersion) { + /* legacy support is incompatible with static dctx */ + if (zds->staticSize) return ERROR(memory_allocation); + { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input); + if (hint==0) zds->streamStage = zdss_init; + return hint; + } } +#endif + { size_t const hSize = ZSTD_getFrameHeader_internal(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); + DEBUGLOG(5, "header size : %u", (U32)hSize); + if (ZSTD_isError(hSize)) { +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) + U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); + if (legacyVersion) { + const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL; + size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0; + DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion); + /* legacy support is incompatible with static dctx */ + if (zds->staticSize) return ERROR(memory_allocation); + CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext, + zds->previousLegacyVersion, legacyVersion, + dict, dictSize)); + zds->legacyVersion = zds->previousLegacyVersion = legacyVersion; + { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input); + if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */ + return hint; + } } +#endif + return hSize; /* error */ + } + if (hSize != 0) { /* need more input */ + size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ + size_t const remainingInput = (size_t)(iend-ip); + assert(iend >= ip); + if (toLoad > remainingInput) { /* not enough input to load full header */ + if (remainingInput > 0) { + memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput); + zds->lhSize += remainingInput; + } + input->pos = input->size; + return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ + } + assert(ip != NULL); + memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; + break; + } } + + /* check for single-pass mode opportunity */ + if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ + && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { + size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart); + if (cSize <= (size_t)(iend-istart)) { + /* shortcut : using single-pass mode */ + size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict); + if (ZSTD_isError(decompressedSize)) return decompressedSize; + DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()") + ip = istart + cSize; + op += decompressedSize; + zds->expected = 0; + zds->streamStage = zdss_init; + someMoreWork = 0; + break; + } } + + /* Consume header (see ZSTDds_decodeFrameHeader) */ + DEBUGLOG(4, "Consume header"); + CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict)); + + if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_frameIdSize); + zds->stage = ZSTDds_skipFrame; + } else { + CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize)); + zds->expected = ZSTD_blockHeaderSize; + zds->stage = ZSTDds_decodeBlockHeader; + } + + /* control buffer memory usage */ + DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)", + (U32)(zds->fParams.windowSize >>10), + (U32)(zds->maxWindowSize >> 10) ); + zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); + if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge); + + /* Adapt buffer sizes to frame header instructions */ + { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); + size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize); + if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) { + size_t const bufferSize = neededInBuffSize + neededOutBuffSize; + DEBUGLOG(4, "inBuff : from %u to %u", + (U32)zds->inBuffSize, (U32)neededInBuffSize); + DEBUGLOG(4, "outBuff : from %u to %u", + (U32)zds->outBuffSize, (U32)neededOutBuffSize); + if (zds->staticSize) { /* static DCtx */ + DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); + assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ + if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx)) + return ERROR(memory_allocation); + } else { + ZSTD_free(zds->inBuff, zds->customMem); + zds->inBuffSize = 0; + zds->outBuffSize = 0; + zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem); + if (zds->inBuff == NULL) return ERROR(memory_allocation); + } + zds->inBuffSize = neededInBuffSize; + zds->outBuff = zds->inBuff + zds->inBuffSize; + zds->outBuffSize = neededOutBuffSize; + } } + zds->streamStage = zdss_read; + /* fall-through */ + + case zdss_read: + DEBUGLOG(5, "stage zdss_read"); + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); + DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize); + if (neededInSize==0) { /* end of frame */ + zds->streamStage = zdss_init; + someMoreWork = 0; + break; + } + if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ + int const isSkipFrame = ZSTD_isSkipFrame(zds); + size_t const decodedSize = ZSTD_decompressContinue(zds, + zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), + ip, neededInSize); + if (ZSTD_isError(decodedSize)) return decodedSize; + ip += neededInSize; + if (!decodedSize && !isSkipFrame) break; /* this was just a header */ + zds->outEnd = zds->outStart + decodedSize; + zds->streamStage = zdss_flush; + break; + } } + if (ip==iend) { someMoreWork = 0; break; } /* no more input */ + zds->streamStage = zdss_load; + /* fall-through */ + + case zdss_load: + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); + size_t const toLoad = neededInSize - zds->inPos; + int const isSkipFrame = ZSTD_isSkipFrame(zds); + size_t loadedSize; + if (isSkipFrame) { + loadedSize = MIN(toLoad, (size_t)(iend-ip)); + } else { + if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */ + loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip); + } + ip += loadedSize; + zds->inPos += loadedSize; + if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ + + /* decode loaded input */ + { size_t const decodedSize = ZSTD_decompressContinue(zds, + zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, + zds->inBuff, neededInSize); + if (ZSTD_isError(decodedSize)) return decodedSize; + zds->inPos = 0; /* input is consumed */ + if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; } /* this was just a header */ + zds->outEnd = zds->outStart + decodedSize; + } } + zds->streamStage = zdss_flush; + /* fall-through */ + + case zdss_flush: + { size_t const toFlushSize = zds->outEnd - zds->outStart; + size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); + op += flushedSize; + zds->outStart += flushedSize; + if (flushedSize == toFlushSize) { /* flush completed */ + zds->streamStage = zdss_read; + if ( (zds->outBuffSize < zds->fParams.frameContentSize) + && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { + DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", + (int)(zds->outBuffSize - zds->outStart), + (U32)zds->fParams.blockSizeMax); + zds->outStart = zds->outEnd = 0; + } + break; + } } + /* cannot complete flush */ + someMoreWork = 0; + break; + + default: return ERROR(GENERIC); /* impossible */ + } } + + /* result */ + input->pos += (size_t)(ip-istart); + output->pos += (size_t)(op-ostart); + { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); + if (!nextSrcSizeHint) { /* frame fully decoded */ + if (zds->outEnd == zds->outStart) { /* output fully flushed */ + if (zds->hostageByte) { + if (input->pos >= input->size) { + /* can't release hostage (not present) */ + zds->streamStage = zdss_read; + return 1; + } + input->pos++; /* release hostage */ + } /* zds->hostageByte */ + return 0; + } /* zds->outEnd == zds->outStart */ + if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ + input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ + zds->hostageByte=1; + } + return 1; + } /* nextSrcSizeHint==0 */ + nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */ + assert(zds->inPos <= nextSrcSizeHint); + nextSrcSizeHint -= zds->inPos; /* part already loaded*/ + return nextSrcSizeHint; + } +} + + +size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + return ZSTD_decompressStream(dctx, output, input); +} + +size_t ZSTD_decompress_generic_simpleArgs ( + ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, size_t* dstPos, + const void* src, size_t srcSize, size_t* srcPos) +{ + ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; + ZSTD_inBuffer input = { src, srcSize, *srcPos }; + /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ + size_t const cErr = ZSTD_decompress_generic(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; +} + +void ZSTD_DCtx_reset(ZSTD_DCtx* dctx) +{ + (void)ZSTD_initDStream(dctx); + dctx->format = ZSTD_f_zstd1; + dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; +} diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.c b/vendor/github.com/DataDog/zstd/zstd_double_fast.c new file mode 100644 index 00000000000..86e6b39621b --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_double_fast.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "zstd_double_fast.h" + + +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, + ZSTD_compressionParameters const* cParams, + void const* end) +{ + U32* const hashLarge = ms->hashTable; + U32 const hBitsL = cParams->hashLog; + U32 const mls = cParams->searchLength; + U32* const hashSmall = ms->chainTable; + U32 const hBitsS = cParams->chainLog; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; + + /* Always insert every fastHashFillStep position into the hash tables. + * Insert the other positions into the large hash table if their entry + * is empty. + */ + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { + U32 const current = (U32)(ip - base); + U32 i; + for (i = 0; i < fastHashFillStep; ++i) { + size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); + size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) + hashSmall[smHash] = current + i; + if (i == 0 || hashLarge[lgHash] == 0) + hashLarge[lgHash] = current + i; + } + } +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_doubleFast_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize, + U32 const mls /* template */) +{ + U32* const hashLong = ms->hashTable; + const U32 hBitsL = cParams->hashLog; + U32* const hashSmall = ms->chainTable; + const U32 hBitsS = cParams->chainLog; + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ms->window.dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved = 0; + + /* init */ + ip += (ip==lowest); + { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); + size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndexL = hashLong[h2]; + U32 const matchIndexS = hashSmall[h]; + const BYTE* matchLong = base + matchIndexL; + const BYTE* match = base + matchIndexS; + hashLong[h2] = hashSmall[h] = current; /* update hash tables */ + + assert(offset_1 <= current); /* supposed guaranteed by construction */ + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + /* favor repcode */ + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + U32 offset; + if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) { + mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; + offset = (U32)(ip-matchLong); + while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) { + size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndexL3 = hashLong[hl3]; + const BYTE* matchL3 = base + matchIndexL3; + hashLong[hl3] = current + 1; + if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) { + mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; + ip++; + offset = (U32)(ip-matchL3); + while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ + } else { + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + offset = (U32)(ip-match); + while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + } else { + ip += ((ip-anchor) >> kSearchStrength) + 1; + continue; + } + + offset_2 = offset_1; + offset_1 = offset; + + ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); + ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved; + rep[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_doubleFast( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + const U32 mls = cParams->searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 7); + } +} + + +static size_t ZSTD_compressBlock_doubleFast_extDict_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize, + U32 const mls /* template */) +{ + U32* const hashLong = ms->hashTable; + U32 const hBitsL = cParams->hashLog; + U32* const hashSmall = ms->chainTable; + U32 const hBitsS = cParams->chainLog; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ms->window.lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=rep[0], offset_2=rep[1]; + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); + const U32 matchIndex = hashSmall[hSmall]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + + const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); + const U32 matchLongIndex = hashLong[hLong]; + const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base; + const BYTE* matchLong = matchLongBase + matchLongIndex; + + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; + hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; + ip++; + ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { + const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8; + offset = current - matchLongIndex; + while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) { + size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndex3 = hashLong[h3]; + const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base; + const BYTE* match3 = match3Base + matchIndex3; + U32 offset; + hashLong[h3] = current + 1; + if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { + const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8; + ip++; + offset = current+1 - matchIndex3; + while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ + } else { + const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; + offset = current - matchIndex; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else { + ip += ((ip-anchor) >> kSearchStrength) + 1; + continue; + } } + + /* found a match : store it */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; + hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2; + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + rep[0] = offset_1; + rep[1] = offset_2; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_doubleFast_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + U32 const mls = cParams->searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 7); + } +} diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.h b/vendor/github.com/DataDog/zstd/zstd_double_fast.h new file mode 100644 index 00000000000..6d80b2774c0 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_double_fast.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_DOUBLE_FAST_H +#define ZSTD_DOUBLE_FAST_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "mem.h" /* U32 */ +#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ + +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, + ZSTD_compressionParameters const* cParams, + void const* end); +size_t ZSTD_compressBlock_doubleFast( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_doubleFast_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_DOUBLE_FAST_H */ diff --git a/vendor/github.com/DataDog/zstd/zstd_errors.h b/vendor/github.com/DataDog/zstd/zstd_errors.h new file mode 100644 index 00000000000..57533f28696 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_errors.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_ERRORS_H_398273423 +#define ZSTD_ERRORS_H_398273423 + +#if defined (__cplusplus) +extern "C" { +#endif + +/*===== dependency =====*/ +#include /* size_t */ + + +/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ +#ifndef ZSTDERRORLIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define ZSTDERRORLIB_VISIBILITY +# endif +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +#endif + +/*-********************************************* + * Error codes list + *-********************************************* + * Error codes _values_ are pinned down since v1.3.1 only. + * Therefore, don't rely on values if you may link to any version < v1.3.1. + * + * Only values < 100 are considered stable. + * + * note 1 : this API shall be used with static linking only. + * dynamic linking is not yet officially supported. + * note 2 : Prefer relying on the enum than on its value whenever possible + * This is the only supported way to use the error list < v1.3.1 + * note 3 : ZSTD_isError() is always correct, whatever the library version. + **********************************************/ +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 10, + ZSTD_error_version_unsupported = 12, + ZSTD_error_frameParameter_unsupported = 14, + ZSTD_error_frameParameter_windowTooLarge = 16, + ZSTD_error_corruption_detected = 20, + ZSTD_error_checksum_wrong = 22, + ZSTD_error_dictionary_corrupted = 30, + ZSTD_error_dictionary_wrong = 32, + ZSTD_error_dictionaryCreation_failed = 34, + ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_outOfBound = 42, + ZSTD_error_tableLog_tooLarge = 44, + ZSTD_error_maxSymbolValue_tooLarge = 46, + ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stage_wrong = 60, + ZSTD_error_init_missing = 62, + ZSTD_error_memory_allocation = 64, + ZSTD_error_workSpace_tooSmall= 66, + ZSTD_error_dstSize_tooSmall = 70, + ZSTD_error_srcSize_wrong = 72, + /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ + ZSTD_error_frameIndex_tooLarge = 100, + ZSTD_error_seekableIO = 102, + ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ +} ZSTD_ErrorCode; + +/*! ZSTD_getErrorCode() : + convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, + which can be used to compare with enum list published above */ +ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); +ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_ERRORS_H_398273423 */ diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.c b/vendor/github.com/DataDog/zstd/zstd_fast.c new file mode 100644 index 00000000000..df4d28b3402 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_fast.c @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "zstd_fast.h" + + +void ZSTD_fillHashTable(ZSTD_matchState_t* ms, + ZSTD_compressionParameters const* cParams, + void const* end) +{ + U32* const hashTable = ms->hashTable; + U32 const hBits = cParams->hashLog; + U32 const mls = cParams->searchLength; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; + + /* Always insert every fastHashFillStep position into the hash table. + * Insert the other positions if their hash entry is empty. + */ + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { + U32 const current = (U32)(ip - base); + U32 i; + for (i = 0; i < fastHashFillStep; ++i) { + size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls); + if (i == 0 || hashTable[hash] == 0) + hashTable[hash] = current + i; + } + } +} + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_fast_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, + U32 const hlog, U32 const stepSize, U32 const mls) +{ + U32* const hashTable = ms->hashTable; + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ms->window.dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved = 0; + + /* init */ + ip += (ip==lowest); + { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h = ZSTD_hashPtr(ip, hlog, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndex = hashTable[h]; + const BYTE* match = base + matchIndex; + hashTable[h] = current; /* update hash table */ + + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ( (matchIndex <= lowestIndex) + || (MEM_read32(match) != MEM_read32(ip)) ) { + assert(stepSize >= 1); + ip += ((ip-anchor) >> kSearchStrength) + stepSize; + continue; + } + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + { U32 const offset = (U32)(ip-match); + while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base); + ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved; + rep[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_fast( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + U32 const hlog = cParams->hashLog; + U32 const mls = cParams->searchLength; + U32 const stepSize = cParams->targetLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4); + case 5 : + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5); + case 6 : + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6); + case 7 : + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7); + } +} + + +static size_t ZSTD_compressBlock_fast_extDict_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, + U32 const hlog, U32 const stepSize, U32 const mls) +{ + U32* hashTable = ms->hashTable; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ms->window.lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=rep[0], offset_2=rep[1]; + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t h = ZSTD_hashPtr(ip, hlog, mls); + const U32 matchIndex = hashTable[h]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; + hashTable[h] = current; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; + ip++; + ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ( (matchIndex < lowestIndex) || + (MEM_read32(match) != MEM_read32(ip)) ) { + assert(stepSize >= 1); + ip += ((ip-anchor) >> kSearchStrength) + stepSize; + continue; + } + { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset = current - matchIndex; + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } } + + /* found a match : store it */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; + hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH); + hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + rep[0] = offset_1; + rep[1] = offset_2; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_fast_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + U32 const hlog = cParams->hashLog; + U32 const mls = cParams->searchLength; + U32 const stepSize = cParams->targetLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4); + case 5 : + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5); + case 6 : + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6); + case 7 : + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7); + } +} diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.h b/vendor/github.com/DataDog/zstd/zstd_fast.h new file mode 100644 index 00000000000..f0438ad5b41 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_fast.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_FAST_H +#define ZSTD_FAST_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "mem.h" /* U32 */ +#include "zstd_compress_internal.h" + +void ZSTD_fillHashTable(ZSTD_matchState_t* ms, + ZSTD_compressionParameters const* cParams, + void const* end); +size_t ZSTD_compressBlock_fast( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_fast_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_FAST_H */ diff --git a/vendor/github.com/DataDog/zstd/zstd_internal.h b/vendor/github.com/DataDog/zstd/zstd_internal.h new file mode 100644 index 00000000000..65c08a82570 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_internal.h @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_CCOMMON_H_MODULE +#define ZSTD_CCOMMON_H_MODULE + +/* this module contains definitions which must be identical + * across compression, decompression and dictBuilder. + * It also contains a few functions useful to at least 2 of them + * and which benefit from being inlined */ + +/*-************************************* +* Dependencies +***************************************/ +#include "compiler.h" +#include "mem.h" +#include "error_private.h" +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ +#endif +#include "xxhash.h" /* XXH_reset, update, digest */ + + +#if defined (__cplusplus) +extern "C" { +#endif + + +/*-************************************* +* Debug +***************************************/ +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + +#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; } + +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2) +# include +extern int g_debuglog_enable; +/* recommended values for ZSTD_DEBUG display levels : + * 1 : no display, enables assert() only + * 2 : reserved for currently active debug path + * 3 : events once per object lifetime (CCtx, CDict, etc.) + * 4 : events once per frame + * 5 : events once per block + * 6 : events once per sequence (*very* verbose) */ +# define RAWLOG(l, ...) { \ + if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) { \ + fprintf(stderr, __VA_ARGS__); \ + } } +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) { \ + fprintf(stderr, __FILE__ ": " __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define RAWLOG(l, ...) {} /* disabled */ +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + + +/*-************************************* +* shared macros +***************************************/ +#undef MIN +#undef MAX +#define MIN(a,b) ((a)<(b) ? (a) : (b)) +#define MAX(a,b) ((a)>(b) ? (a) : (b)) +#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */ +#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */ + + +/*-************************************* +* Common constants +***************************************/ +#define ZSTD_OPT_NUM (1<<12) + +#define ZSTD_REP_NUM 3 /* number of repcodes */ +#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) +static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define BIT7 128 +#define BIT6 64 +#define BIT5 32 +#define BIT4 16 +#define BIT1 2 +#define BIT0 1 + +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 +#define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */ +static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; +static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; + +#define ZSTD_FRAMEIDSIZE 4 +static const size_t ZSTD_frameIdSize = ZSTD_FRAMEIDSIZE; /* magic number size */ + +#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ +static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; + +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ + +#define HufLog 12 +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; + +#define LONGNBSEQ 0x7F00 + +#define MINMATCH 3 + +#define Litbits 8 +#define MaxLit ((1<= 3) /* GCC Intrinsic */ + return 31 - __builtin_clz(val); +# else /* Software version */ + static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; +# endif + } +} + + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ + + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +/*! ZSTD_getcBlockSize() : + * Provides the size of compressed block from block header `src` */ +/* Used by: decompress, fullbench (does not get its definition from here) */ +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, + blockProperties_t* bpPtr); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.c b/vendor/github.com/DataDog/zstd/zstd_lazy.c new file mode 100644 index 00000000000..9f158123f07 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_lazy.c @@ -0,0 +1,824 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "zstd_lazy.h" + + +/*-************************************* +* Binary Tree search +***************************************/ + +void ZSTD_updateDUBT( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* iend, + U32 mls) +{ + U32* const hashTable = ms->hashTable; + U32 const hashLog = cParams->hashLog; + + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + + const BYTE* const base = ms->window.base; + U32 const target = (U32)(ip - base); + U32 idx = ms->nextToUpdate; + + if (idx != target) + DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)", + idx, target, ms->window.dictLimit); + assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */ + (void)iend; + + assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */ + for ( ; idx < target ; idx++) { + size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */ + U32 const matchIndex = hashTable[h]; + + U32* const nextCandidatePtr = bt + 2*(idx&btMask); + U32* const sortMarkPtr = nextCandidatePtr + 1; + + DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx); + hashTable[h] = idx; /* Update Hash Table */ + *nextCandidatePtr = matchIndex; /* update BT like a chain */ + *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK; + } + ms->nextToUpdate = target; +} + + +/** ZSTD_insertDUBT1() : + * sort one already inserted but unsorted position + * assumption : current >= btlow == (current - btmask) + * doesn't fail */ +static void ZSTD_insertDUBT1( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + U32 current, const BYTE* inputEnd, + U32 nbCompares, U32 btLow, int extDict) +{ + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current; + const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* match; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = smallerPtr + 1; + U32 matchIndex = *smallerPtr; + U32 dummy32; /* to be nullified at the end */ + U32 const windowLow = ms->window.lowLimit; + + DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)", + current, dictLimit, windowLow); + assert(current >= btLow); + assert(ip < iend); /* condition for ZSTD_count */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + assert(matchIndex < current); + + if ( (!extDict) + || (matchIndex+matchLength >= dictLimit) /* both in current segment*/ + || (current < dictLimit) /* both in extDict */) { + const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase; + assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */ + || (current < dictLimit) ); + match = mBase + matchIndex; + matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ", + current, matchIndex, (U32)matchLength); + + if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ + } + + if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u", + matchIndex, btLow, nextPtr[1]); + smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ + matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u", + matchIndex, btLow, nextPtr[0]); + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; +} + + +static size_t ZSTD_DUBT_findBestMatch ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* const ip, const BYTE* const iend, + size_t* offsetPtr, + U32 const mls, + U32 const extDict) +{ + U32* const hashTable = ms->hashTable; + U32 const hashLog = cParams->hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32 matchIndex = hashTable[h]; + + const BYTE* const base = ms->window.base; + U32 const current = (U32)(ip-base); + U32 const windowLow = ms->window.lowLimit; + + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 const btLow = (btMask >= current) ? 0 : current - btMask; + U32 const unsortLimit = MAX(btLow, windowLow); + + U32* nextCandidate = bt + 2*(matchIndex&btMask); + U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1; + U32 nbCompares = 1U << cParams->searchLog; + U32 nbCandidates = nbCompares; + U32 previousCandidate = 0; + + DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current); + assert(ip <= iend-8); /* required for h calculation */ + + /* reach end of unsorted candidates list */ + while ( (matchIndex > unsortLimit) + && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK) + && (nbCandidates > 1) ) { + DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted", + matchIndex); + *unsortedMark = previousCandidate; + previousCandidate = matchIndex; + matchIndex = *nextCandidate; + nextCandidate = bt + 2*(matchIndex&btMask); + unsortedMark = bt + 2*(matchIndex&btMask) + 1; + nbCandidates --; + } + + if ( (matchIndex > unsortLimit) + && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) { + DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u", + matchIndex); + *nextCandidate = *unsortedMark = 0; /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */ + } + + /* batch sort stacked candidates */ + matchIndex = previousCandidate; + while (matchIndex) { /* will end on matchIndex == 0 */ + U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1; + U32 const nextCandidateIdx = *nextCandidateIdxPtr; + ZSTD_insertDUBT1(ms, cParams, matchIndex, iend, + nbCandidates, unsortLimit, extDict); + matchIndex = nextCandidateIdx; + nbCandidates++; + } + + /* find longest match */ + { size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8+1; + U32 dummy32; /* to be nullified at the end */ + size_t bestLength = 0; + + matchIndex = hashTable[h]; + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) + bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; + if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + } + + if (match[matchLength] < ip[matchLength]) { + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + + assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */ + ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ + if (bestLength >= MINMATCH) { + U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; + DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", + current, (U32)bestLength, (U32)*offsetPtr, mIndex); + } + return bestLength; + } +} + + +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ +static size_t ZSTD_BtFindBestMatch ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 mls /* template */) +{ + DEBUGLOG(7, "ZSTD_BtFindBestMatch"); + if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls); + return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0); +} + + +static size_t ZSTD_BtFindBestMatch_selectMLS ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(cParams->searchLength) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4); + case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5); + case 7 : + case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6); + } +} + + +/** Tree updater, providing best match */ +static size_t ZSTD_BtFindBestMatch_extDict ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 mls) +{ + DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict"); + if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls); + return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1); +} + + +static size_t ZSTD_BtFindBestMatch_selectMLS_extDict ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(cParams->searchLength) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4); + case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5); + case 7 : + case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6); + } +} + + + +/* ********************************* +* Hash Chain +***********************************/ +#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask] + +/* Update chains up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +static U32 ZSTD_insertAndFindFirstIndex_internal( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, U32 const mls) +{ + U32* const hashTable = ms->hashTable; + const U32 hashLog = cParams->hashLog; + U32* const chainTable = ms->chainTable; + const U32 chainMask = (1 << cParams->chainLog) - 1; + const BYTE* const base = ms->window.base; + const U32 target = (U32)(ip - base); + U32 idx = ms->nextToUpdate; + + while(idx < target) { /* catch up */ + size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); + NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; + hashTable[h] = idx; + idx++; + } + + ms->nextToUpdate = target; + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; +} + +U32 ZSTD_insertAndFindFirstIndex( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip) +{ + return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength); +} + + +/* inlining is important to hardwire a hot branch (template emulation) */ +FORCE_INLINE_TEMPLATE +size_t ZSTD_HcFindBestMatch_generic ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 mls, const U32 extDict) +{ + U32* const chainTable = ms->chainTable; + const U32 chainSize = (1 << cParams->chainLog); + const U32 chainMask = chainSize-1; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const U32 lowLimit = ms->window.lowLimit; + const U32 current = (U32)(ip-base); + const U32 minChain = current > chainSize ? current - chainSize : 0; + U32 nbAttempts = 1U << cParams->searchLog; + size_t ml=4-1; + + /* HC4 match finder */ + U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls); + + for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { + size_t currentMl=0; + if ((!extDict) || matchIndex >= dictLimit) { + const BYTE* const match = base + matchIndex; + if (match[ml] == ip[ml]) /* potentially better */ + currentMl = ZSTD_count(ip, match, iLimit); + } else { + const BYTE* const match = dictBase + matchIndex; + assert(match+4 <= dictEnd); + if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; + } + + /* save best solution */ + if (currentMl > ml) { + ml = currentMl; + *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; + if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ + } + + if (matchIndex <= minChain) break; + matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); + } + + return ml; +} + + +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(cParams->searchLength) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0); + case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0); + } +} + + +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* const iLimit, + size_t* const offsetPtr) +{ + switch(cParams->searchLength) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1); + case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1); + } +} + + +/* ******************************* +* Common parser - lazy strategy +*********************************/ +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_lazy_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, + const void* src, size_t srcSize, + const U32 searchMethod, const U32 depth) +{ + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ms->window.base + ms->window.dictLimit; + + typedef size_t (*searchMax_f)( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); + searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; + U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; + + /* init */ + ip += (ip==base); + ms->nextToUpdate3 = ms->nextToUpdate; + { U32 const maxRep = (U32)(ip-base); + if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; + if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + } + + /* Match Loop */ + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + + /* check repCode */ + if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) { + /* repcode : we take it */ + matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + if (depth==0) goto _storeSequence; + } + + /* first search (depth 0) */ + { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < 4) { + ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; + int const gain2 = (int)(mlRep * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((mlRep >= 4) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } + { size_t offset2=99999999; + size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; + int const gain2 = (int)(ml2 * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((ml2 >= 4) && (gain2 > gain1)) + matchLength = ml2, offset = 0, start = ip; + } + { size_t offset2=99999999; + size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* NOTE: + * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. + * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which + * overflows the pointer, which is undefined behavior. + */ + /* catch up */ + if (offset) { + while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > base)) + && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */ + { start--; matchLength++; } + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while ( ((ip <= ilimit) & (offset_2>0)) + && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { + /* store sequence */ + matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ + ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } + + /* Save reps for next block */ + rep[0] = offset_1 ? offset_1 : savedOffset; + rep[1] = offset_2 ? offset_2 : savedOffset; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_btlazy2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2); +} + +size_t ZSTD_compressBlock_lazy2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2); +} + +size_t ZSTD_compressBlock_lazy( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1); +} + +size_t ZSTD_compressBlock_greedy( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0); +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_lazy_extDict_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, + const void* src, size_t srcSize, + const U32 searchMethod, const U32 depth) +{ + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ms->window.base; + const U32 dictLimit = ms->window.dictLimit; + const U32 lowestIndex = ms->window.lowLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictBase = ms->window.dictBase; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + + typedef size_t (*searchMax_f)( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); + searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; + + U32 offset_1 = rep[0], offset_2 = rep[1]; + + /* init */ + ms->nextToUpdate3 = ms->nextToUpdate; + ip += (ip == prefixStart); + + /* Match Loop */ + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + U32 current = (U32)(ip-base); + + /* check repCode */ + { const U32 repIndex = (U32)(current+1 - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip+1) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4; + if (depth==0) goto _storeSequence; + } } + + /* first search (depth 0) */ + { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < 4) { + ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + int const gain2 = (int)(repLength * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= 4) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 1 */ + { size_t offset2=99999999; + size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + int const gain2 = (int)(repLength * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= 4) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 2 */ + { size_t offset2=99999999; + size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* catch up */ + if (offset) { + U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; + const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; + while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while (ip <= ilimit) { + const U32 repIndex = (U32)((ip-base) - offset_2); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ + ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } + break; + } } + + /* Save reps for next block */ + rep[0] = offset_1; + rep[1] = offset_2; + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_greedy_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0); +} + +size_t ZSTD_compressBlock_lazy_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) + +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1); +} + +size_t ZSTD_compressBlock_lazy2_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) + +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2); +} + +size_t ZSTD_compressBlock_btlazy2_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) + +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2); +} diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.h b/vendor/github.com/DataDog/zstd/zstd_lazy.h new file mode 100644 index 00000000000..bda064f1997 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_lazy.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_LAZY_H +#define ZSTD_LAZY_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "zstd_compress_internal.h" + +U32 ZSTD_insertAndFindFirstIndex( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip); + +void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */ + +size_t ZSTD_compressBlock_btlazy2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); + +size_t ZSTD_compressBlock_greedy_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btlazy2_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_LAZY_H */ diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.c b/vendor/github.com/DataDog/zstd/zstd_ldm.c new file mode 100644 index 00000000000..bffd8a3dfaa --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_ldm.c @@ -0,0 +1,653 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#include "zstd_ldm.h" + +#include "zstd_fast.h" /* ZSTD_fillHashTable() */ +#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ + +#define LDM_BUCKET_SIZE_LOG 3 +#define LDM_MIN_MATCH_LENGTH 64 +#define LDM_HASH_RLOG 7 +#define LDM_HASH_CHAR_OFFSET 10 + +void ZSTD_ldm_adjustParameters(ldmParams_t* params, + ZSTD_compressionParameters const* cParams) +{ + U32 const windowLog = cParams->windowLog; + ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); + DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); + if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; + if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; + if (cParams->strategy >= ZSTD_btopt) { + /* Get out of the way of the optimal parser */ + U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength); + assert(minMatch >= ZSTD_LDM_MINMATCH_MIN); + assert(minMatch <= ZSTD_LDM_MINMATCH_MAX); + params->minMatchLength = minMatch; + } + if (params->hashLog == 0) { + params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG); + assert(params->hashLog <= ZSTD_HASHLOG_MAX); + } + if (params->hashEveryLog == 0) { + params->hashEveryLog = + windowLog < params->hashLog ? 0 : windowLog - params->hashLog; + } + params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); +} + +size_t ZSTD_ldm_getTableSize(ldmParams_t params) +{ + size_t const ldmHSize = ((size_t)1) << params.hashLog; + size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog); + size_t const ldmBucketSize = + ((size_t)1) << (params.hashLog - ldmBucketSizeLog); + size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t); + return params.enableLdm ? totalSize : 0; +} + +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) +{ + return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0; +} + +/** ZSTD_ldm_getSmallHash() : + * numBits should be <= 32 + * If numBits==0, returns 0. + * @return : the most significant numBits of value. */ +static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits) +{ + assert(numBits <= 32); + return numBits == 0 ? 0 : (U32)(value >> (64 - numBits)); +} + +/** ZSTD_ldm_getChecksum() : + * numBitsToDiscard should be <= 32 + * @return : the next most significant 32 bits after numBitsToDiscard */ +static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard) +{ + assert(numBitsToDiscard <= 32); + return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF; +} + +/** ZSTD_ldm_getTag() ; + * Given the hash, returns the most significant numTagBits bits + * after (32 + hbits) bits. + * + * If there are not enough bits remaining, return the last + * numTagBits bits. */ +static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits) +{ + assert(numTagBits < 32 && hbits <= 32); + if (32 - hbits < numTagBits) { + return hash & (((U32)1 << numTagBits) - 1); + } else { + return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1); + } +} + +/** ZSTD_ldm_getBucket() : + * Returns a pointer to the start of the bucket associated with hash. */ +static ldmEntry_t* ZSTD_ldm_getBucket( + ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) +{ + return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); +} + +/** ZSTD_ldm_insertEntry() : + * Insert the entry with corresponding hash into the hash table */ +static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, + size_t const hash, const ldmEntry_t entry, + ldmParams_t const ldmParams) +{ + BYTE* const bucketOffsets = ldmState->bucketOffsets; + *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry; + bucketOffsets[hash]++; + bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1; +} + +/** ZSTD_ldm_makeEntryAndInsertByTag() : + * + * Gets the small hash, checksum, and tag from the rollingHash. + * + * If the tag matches (1 << ldmParams.hashEveryLog)-1, then + * creates an ldmEntry from the offset, and inserts it into the hash table. + * + * hBits is the length of the small hash, which is the most significant hBits + * of rollingHash. The checksum is the next 32 most significant bits, followed + * by ldmParams.hashEveryLog bits that make up the tag. */ +static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState, + U64 const rollingHash, + U32 const hBits, + U32 const offset, + ldmParams_t const ldmParams) +{ + U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog); + U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1; + if (tag == tagMask) { + U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits); + U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); + ldmEntry_t entry; + entry.offset = offset; + entry.checksum = checksum; + ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams); + } +} + +/** ZSTD_ldm_getRollingHash() : + * Get a 64-bit hash using the first len bytes from buf. + * + * Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be + * H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0) + * + * where the constant a is defined to be prime8bytes. + * + * The implementation adds an offset to each byte, so + * H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */ +static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len) +{ + U64 ret = 0; + U32 i; + for (i = 0; i < len; i++) { + ret *= prime8bytes; + ret += buf[i] + LDM_HASH_CHAR_OFFSET; + } + return ret; +} + +/** ZSTD_ldm_ipow() : + * Return base^exp. */ +static U64 ZSTD_ldm_ipow(U64 base, U64 exp) +{ + U64 ret = 1; + while (exp) { + if (exp & 1) { ret *= base; } + exp >>= 1; + base *= base; + } + return ret; +} + +U64 ZSTD_ldm_getHashPower(U32 minMatchLength) { + DEBUGLOG(4, "ZSTD_ldm_getHashPower: mml=%u", minMatchLength); + assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN); + return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1); +} + +/** ZSTD_ldm_updateHash() : + * Updates hash by removing toRemove and adding toAdd. */ +static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower) +{ + hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower); + hash *= prime8bytes; + hash += toAdd + LDM_HASH_CHAR_OFFSET; + return hash; +} + +/** ZSTD_ldm_countBackwardsMatch() : + * Returns the number of bytes that match backwards before pIn and pMatch. + * + * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ +static size_t ZSTD_ldm_countBackwardsMatch( + const BYTE* pIn, const BYTE* pAnchor, + const BYTE* pMatch, const BYTE* pBase) +{ + size_t matchLength = 0; + while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) { + pIn--; + pMatch--; + matchLength++; + } + return matchLength; +} + +/** ZSTD_ldm_fillFastTables() : + * + * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. + * This is similar to ZSTD_loadDictionaryContent. + * + * The tables for the other strategies are filled within their + * block compressors. */ +static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, + ZSTD_compressionParameters const* cParams, + void const* end) +{ + const BYTE* const iend = (const BYTE*)end; + + switch(cParams->strategy) + { + case ZSTD_fast: + ZSTD_fillHashTable(ms, cParams, iend); + ms->nextToUpdate = (U32)(iend - ms->window.base); + break; + + case ZSTD_dfast: + ZSTD_fillDoubleHashTable(ms, cParams, iend); + ms->nextToUpdate = (U32)(iend - ms->window.base); + break; + + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + case ZSTD_btlazy2: + case ZSTD_btopt: + case ZSTD_btultra: + break; + default: + assert(0); /* not possible : not a valid strategy id */ + } + + return 0; +} + +/** ZSTD_ldm_fillLdmHashTable() : + * + * Fills hashTable from (lastHashed + 1) to iend (non-inclusive). + * lastHash is the rolling hash that corresponds to lastHashed. + * + * Returns the rolling hash corresponding to position iend-1. */ +static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state, + U64 lastHash, const BYTE* lastHashed, + const BYTE* iend, const BYTE* base, + U32 hBits, ldmParams_t const ldmParams) +{ + U64 rollingHash = lastHash; + const BYTE* cur = lastHashed + 1; + + while (cur < iend) { + rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1], + cur[ldmParams.minMatchLength-1], + state->hashPower); + ZSTD_ldm_makeEntryAndInsertByTag(state, + rollingHash, hBits, + (U32)(cur - base), ldmParams); + ++cur; + } + return rollingHash; +} + + +/** ZSTD_ldm_limitTableUpdate() : + * + * Sets cctx->nextToUpdate to a position corresponding closer to anchor + * if it is far way + * (after a long match, only update tables a limited amount). */ +static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) +{ + U32 const current = (U32)(anchor - ms->window.base); + if (current > ms->nextToUpdate + 1024) { + ms->nextToUpdate = + current - MIN(512, current - ms->nextToUpdate - 1024); + } +} + +static size_t ZSTD_ldm_generateSequences_internal( + ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, + ldmParams_t const* params, void const* src, size_t srcSize) +{ + /* LDM parameters */ + int const extDict = ZSTD_window_hasExtDict(ldmState->window); + U32 const minMatchLength = params->minMatchLength; + U64 const hashPower = ldmState->hashPower; + U32 const hBits = params->hashLog - params->bucketSizeLog; + U32 const ldmBucketSize = 1U << params->bucketSizeLog; + U32 const hashEveryLog = params->hashEveryLog; + U32 const ldmTagMask = (1U << params->hashEveryLog) - 1; + /* Prefix and extDict parameters */ + U32 const dictLimit = ldmState->window.dictLimit; + U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit; + BYTE const* const base = ldmState->window.base; + BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL; + BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL; + BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL; + BYTE const* const lowPrefixPtr = base + dictLimit; + /* Input bounds */ + BYTE const* const istart = (BYTE const*)src; + BYTE const* const iend = istart + srcSize; + BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE); + /* Input positions */ + BYTE const* anchor = istart; + BYTE const* ip = istart; + /* Rolling hash */ + BYTE const* lastHashed = NULL; + U64 rollingHash = 0; + + while (ip <= ilimit) { + size_t mLength; + U32 const current = (U32)(ip - base); + size_t forwardMatchLength = 0, backwardMatchLength = 0; + ldmEntry_t* bestEntry = NULL; + if (ip != istart) { + rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0], + lastHashed[minMatchLength], + hashPower); + } else { + rollingHash = ZSTD_ldm_getRollingHash(ip, minMatchLength); + } + lastHashed = ip; + + /* Do not insert and do not look for a match */ + if (ZSTD_ldm_getTag(rollingHash, hBits, hashEveryLog) != ldmTagMask) { + ip++; + continue; + } + + /* Get the best entry and compute the match lengths */ + { + ldmEntry_t* const bucket = + ZSTD_ldm_getBucket(ldmState, + ZSTD_ldm_getSmallHash(rollingHash, hBits), + *params); + ldmEntry_t* cur; + size_t bestMatchLength = 0; + U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); + + for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) { + size_t curForwardMatchLength, curBackwardMatchLength, + curTotalMatchLength; + if (cur->checksum != checksum || cur->offset <= lowestIndex) { + continue; + } + if (extDict) { + BYTE const* const curMatchBase = + cur->offset < dictLimit ? dictBase : base; + BYTE const* const pMatch = curMatchBase + cur->offset; + BYTE const* const matchEnd = + cur->offset < dictLimit ? dictEnd : iend; + BYTE const* const lowMatchPtr = + cur->offset < dictLimit ? dictStart : lowPrefixPtr; + + curForwardMatchLength = ZSTD_count_2segments( + ip, pMatch, iend, + matchEnd, lowPrefixPtr); + if (curForwardMatchLength < minMatchLength) { + continue; + } + curBackwardMatchLength = + ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, + lowMatchPtr); + curTotalMatchLength = curForwardMatchLength + + curBackwardMatchLength; + } else { /* !extDict */ + BYTE const* const pMatch = base + cur->offset; + curForwardMatchLength = ZSTD_count(ip, pMatch, iend); + if (curForwardMatchLength < minMatchLength) { + continue; + } + curBackwardMatchLength = + ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, + lowPrefixPtr); + curTotalMatchLength = curForwardMatchLength + + curBackwardMatchLength; + } + + if (curTotalMatchLength > bestMatchLength) { + bestMatchLength = curTotalMatchLength; + forwardMatchLength = curForwardMatchLength; + backwardMatchLength = curBackwardMatchLength; + bestEntry = cur; + } + } + } + + /* No match found -- continue searching */ + if (bestEntry == NULL) { + ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, + hBits, current, + *params); + ip++; + continue; + } + + /* Match found */ + mLength = forwardMatchLength + backwardMatchLength; + ip -= backwardMatchLength; + + { + /* Store the sequence: + * ip = current - backwardMatchLength + * The match is at (bestEntry->offset - backwardMatchLength) + */ + U32 const matchIndex = bestEntry->offset; + U32 const offset = current - matchIndex; + rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size; + + /* Out of sequence storage */ + if (rawSeqStore->size == rawSeqStore->capacity) + return ERROR(dstSize_tooSmall); + seq->litLength = (U32)(ip - anchor); + seq->matchLength = (U32)mLength; + seq->offset = offset; + rawSeqStore->size++; + } + + /* Insert the current entry into the hash table */ + ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, + (U32)(lastHashed - base), + *params); + + assert(ip + backwardMatchLength == lastHashed); + + /* Fill the hash table from lastHashed+1 to ip+mLength*/ + /* Heuristic: don't need to fill the entire table at end of block */ + if (ip + mLength <= ilimit) { + rollingHash = ZSTD_ldm_fillLdmHashTable( + ldmState, rollingHash, lastHashed, + ip + mLength, base, hBits, *params); + lastHashed = ip + mLength - 1; + } + ip += mLength; + anchor = ip; + } + return iend - anchor; +} + +/*! ZSTD_ldm_reduceTable() : + * reduce table indexes by `reducerValue` */ +static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, + U32 const reducerValue) +{ + U32 u; + for (u = 0; u < size; u++) { + if (table[u].offset < reducerValue) table[u].offset = 0; + else table[u].offset -= reducerValue; + } +} + +size_t ZSTD_ldm_generateSequences( + ldmState_t* ldmState, rawSeqStore_t* sequences, + ldmParams_t const* params, void const* src, size_t srcSize) +{ + U32 const maxDist = 1U << params->windowLog; + BYTE const* const istart = (BYTE const*)src; + BYTE const* const iend = istart + srcSize; + size_t const kMaxChunkSize = 1 << 20; + size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0); + size_t chunk; + size_t leftoverSize = 0; + + assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize); + /* Check that ZSTD_window_update() has been called for this chunk prior + * to passing it to this function. + */ + assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize); + /* The input could be very large (in zstdmt), so it must be broken up into + * chunks to enforce the maximmum distance and handle overflow correction. + */ + assert(sequences->pos <= sequences->size); + assert(sequences->size <= sequences->capacity); + for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) { + BYTE const* const chunkStart = istart + chunk * kMaxChunkSize; + size_t const remaining = (size_t)(iend - chunkStart); + BYTE const *const chunkEnd = + (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize; + size_t const chunkSize = chunkEnd - chunkStart; + size_t newLeftoverSize; + size_t const prevSize = sequences->size; + + assert(chunkStart < iend); + /* 1. Perform overflow correction if necessary. */ + if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) { + U32 const ldmHSize = 1U << params->hashLog; + U32 const correction = ZSTD_window_correctOverflow( + &ldmState->window, /* cycleLog */ 0, maxDist, src); + ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); + } + /* 2. We enforce the maximum offset allowed. + * + * kMaxChunkSize should be small enough that we don't lose too much of + * the window through early invalidation. + * TODO: * Test the chunk size. + * * Try invalidation after the sequence generation and test the + * the offset against maxDist directly. + */ + ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL); + /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */ + newLeftoverSize = ZSTD_ldm_generateSequences_internal( + ldmState, sequences, params, chunkStart, chunkSize); + if (ZSTD_isError(newLeftoverSize)) + return newLeftoverSize; + /* 4. We add the leftover literals from previous iterations to the first + * newly generated sequence, or add the `newLeftoverSize` if none are + * generated. + */ + /* Prepend the leftover literals from the last call */ + if (prevSize < sequences->size) { + sequences->seq[prevSize].litLength += (U32)leftoverSize; + leftoverSize = newLeftoverSize; + } else { + assert(newLeftoverSize == chunkSize); + leftoverSize += chunkSize; + } + } + return 0; +} + +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) { + while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { + rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; + if (srcSize <= seq->litLength) { + /* Skip past srcSize literals */ + seq->litLength -= (U32)srcSize; + return; + } + srcSize -= seq->litLength; + seq->litLength = 0; + if (srcSize < seq->matchLength) { + /* Skip past the first srcSize of the match */ + seq->matchLength -= (U32)srcSize; + if (seq->matchLength < minMatch) { + /* The match is too short, omit it */ + if (rawSeqStore->pos + 1 < rawSeqStore->size) { + seq[1].litLength += seq[0].matchLength; + } + rawSeqStore->pos++; + } + return; + } + srcSize -= seq->matchLength; + seq->matchLength = 0; + rawSeqStore->pos++; + } +} + +/** + * If the sequence length is longer than remaining then the sequence is split + * between this block and the next. + * + * Returns the current sequence to handle, or if the rest of the block should + * be literals, it returns a sequence with offset == 0. + */ +static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, + U32 const remaining, U32 const minMatch) +{ + rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; + assert(sequence.offset > 0); + /* Likely: No partial sequence */ + if (remaining >= sequence.litLength + sequence.matchLength) { + rawSeqStore->pos++; + return sequence; + } + /* Cut the sequence short (offset == 0 ==> rest is literals). */ + if (remaining <= sequence.litLength) { + sequence.offset = 0; + } else if (remaining < sequence.litLength + sequence.matchLength) { + sequence.matchLength = remaining - sequence.litLength; + if (sequence.matchLength < minMatch) { + sequence.offset = 0; + } + } + /* Skip past `remaining` bytes for the future sequences. */ + ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); + return sequence; +} + +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize, + int const extDict) +{ + unsigned const minMatch = cParams->searchLength; + ZSTD_blockCompressor const blockCompressor = + ZSTD_selectBlockCompressor(cParams->strategy, extDict); + BYTE const* const base = ms->window.base; + /* Input bounds */ + BYTE const* const istart = (BYTE const*)src; + BYTE const* const iend = istart + srcSize; + /* Input positions */ + BYTE const* ip = istart; + + assert(rawSeqStore->pos <= rawSeqStore->size); + assert(rawSeqStore->size <= rawSeqStore->capacity); + /* Loop through each sequence and apply the block compressor to the lits */ + while (rawSeqStore->pos < rawSeqStore->size && ip < iend) { + /* maybeSplitSequence updates rawSeqStore->pos */ + rawSeq const sequence = maybeSplitSequence(rawSeqStore, + (U32)(iend - ip), minMatch); + int i; + /* End signal */ + if (sequence.offset == 0) + break; + + assert(sequence.offset <= (1U << cParams->windowLog)); + assert(ip + sequence.litLength + sequence.matchLength <= iend); + + /* Fill tables for block compressor */ + ZSTD_ldm_limitTableUpdate(ms, ip); + ZSTD_ldm_fillFastTables(ms, cParams, ip); + /* Run the block compressor */ + { + size_t const newLitLength = + blockCompressor(ms, seqStore, rep, cParams, ip, + sequence.litLength); + ip += sequence.litLength; + ms->nextToUpdate = (U32)(ip - base); + /* Update the repcodes */ + for (i = ZSTD_REP_NUM - 1; i > 0; i--) + rep[i] = rep[i-1]; + rep[0] = sequence.offset; + /* Store the sequence */ + ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, + sequence.offset + ZSTD_REP_MOVE, + sequence.matchLength - MINMATCH); + ip += sequence.matchLength; + } + } + /* Fill the tables for the block compressor */ + ZSTD_ldm_limitTableUpdate(ms, ip); + ZSTD_ldm_fillFastTables(ms, cParams, ip); + /* Compress the last literals */ + { + size_t const lastLiterals = blockCompressor(ms, seqStore, rep, cParams, + ip, iend - ip); + ms->nextToUpdate = (U32)(iend - base); + return lastLiterals; + } +} diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.h b/vendor/github.com/DataDog/zstd/zstd_ldm.h new file mode 100644 index 00000000000..0c3789ff137 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_ldm.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + */ + +#ifndef ZSTD_LDM_H +#define ZSTD_LDM_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "zstd_compress_internal.h" /* ldmParams_t, U32 */ +#include "zstd.h" /* ZSTD_CCtx, size_t */ + +/*-************************************* +* Long distance matching +***************************************/ + +#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX + +/** + * ZSTD_ldm_generateSequences(): + * + * Generates the sequences using the long distance match finder. + * Generates long range matching sequences in `sequences`, which parse a prefix + * of the source. `sequences` must be large enough to store every sequence, + * which can be checked with `ZSTD_ldm_getMaxNbSeq()`. + * @returns 0 or an error code. + * + * NOTE: The user must have called ZSTD_window_update() for all of the input + * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. + * NOTE: This function returns an error if it runs out of space to store + * sequences. + */ +size_t ZSTD_ldm_generateSequences( + ldmState_t* ldms, rawSeqStore_t* sequences, + ldmParams_t const* params, void const* src, size_t srcSize); + +/** + * ZSTD_ldm_blockCompress(): + * + * Compresses a block using the predefined sequences, along with a secondary + * block compressor. The literals section of every sequence is passed to the + * secondary block compressor, and those sequences are interspersed with the + * predefined sequences. Returns the length of the last literals. + * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed. + * `rawSeqStore.seq` may also be updated to split the last sequence between two + * blocks. + * @return The length of the last literals. + * + * NOTE: The source must be at most the maximum block size, but the predefined + * sequences can be any size, and may be longer than the block. In the case that + * they are longer than the block, the last sequences may need to be split into + * two. We handle that case correctly, and update `rawSeqStore` appropriately. + * NOTE: This function does not return any errors. + */ +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, + void const* src, size_t srcSize, + int const extDict); + +/** + * ZSTD_ldm_skipSequences(): + * + * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. + * Avoids emitting matches less than `minMatch` bytes. + * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). + */ +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, + U32 const minMatch); + + +/** ZSTD_ldm_getTableSize() : + * Estimate the space needed for long distance matching tables or 0 if LDM is + * disabled. + */ +size_t ZSTD_ldm_getTableSize(ldmParams_t params); + +/** ZSTD_ldm_getSeqSpace() : + * Return an upper bound on the number of sequences that can be produced by + * the long distance matcher, or 0 if LDM is disabled. + */ +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize); + +/** ZSTD_ldm_getTableSize() : + * Return prime8bytes^(minMatchLength-1) */ +U64 ZSTD_ldm_getHashPower(U32 minMatchLength); + +/** ZSTD_ldm_adjustParameters() : + * If the params->hashEveryLog is not set, set it to its default value based on + * windowLog and params->hashLog. + * + * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to + * params->hashLog if it is not). + * + * Ensures that the minMatchLength >= targetLength during optimal parsing. + */ +void ZSTD_ldm_adjustParameters(ldmParams_t* params, + ZSTD_compressionParameters const* cParams); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_FAST_H */ diff --git a/vendor/github.com/DataDog/zstd/zstd_legacy.h b/vendor/github.com/DataDog/zstd/zstd_legacy.h new file mode 100644 index 00000000000..5893cb9657e --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_legacy.h @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_LEGACY_H +#define ZSTD_LEGACY_H + +#if defined (__cplusplus) +extern "C" { +#endif + +/* ************************************* +* Includes +***************************************/ +#include "mem.h" /* MEM_STATIC */ +#include "error_private.h" /* ERROR */ +#include "zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer */ + +#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0) +# undef ZSTD_LEGACY_SUPPORT +# define ZSTD_LEGACY_SUPPORT 8 +#endif + +#if (ZSTD_LEGACY_SUPPORT <= 1) +# include "zstd_v01.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 2) +# include "zstd_v02.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 3) +# include "zstd_v03.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 4) +# include "zstd_v04.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) +# include "zstd_v05.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) +# include "zstd_v06.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) +# include "zstd_v07.h" +#endif + +/** ZSTD_isLegacy() : + @return : > 0 if supported by legacy decoder. 0 otherwise. + return value is the version. +*/ +MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize) +{ + U32 magicNumberLE; + if (srcSize<4) return 0; + magicNumberLE = MEM_readLE32(src); + switch(magicNumberLE) + { +#if (ZSTD_LEGACY_SUPPORT <= 1) + case ZSTDv01_magicNumberLE:return 1; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 2) + case ZSTDv02_magicNumber : return 2; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 3) + case ZSTDv03_magicNumber : return 3; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 4) + case ZSTDv04_magicNumber : return 4; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case ZSTDv05_MAGICNUMBER : return 5; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case ZSTDv06_MAGICNUMBER : return 6; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case ZSTDv07_MAGICNUMBER : return 7; +#endif + default : return 0; + } +} + + +MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize) +{ + U32 const version = ZSTD_isLegacy(src, srcSize); + if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */ +#if (ZSTD_LEGACY_SUPPORT <= 5) + if (version==5) { + ZSTDv05_parameters fParams; + size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize); + if (frResult != 0) return 0; + return fParams.srcSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + if (version==6) { + ZSTDv06_frameParams fParams; + size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize); + if (frResult != 0) return 0; + return fParams.frameContentSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + if (version==7) { + ZSTDv07_frameParams fParams; + size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize); + if (frResult != 0) return 0; + return fParams.frameContentSize; + } +#endif + return 0; /* should not be possible */ +} + + +MEM_STATIC size_t ZSTD_decompressLegacy( + void* dst, size_t dstCapacity, + const void* src, size_t compressedSize, + const void* dict,size_t dictSize) +{ + U32 const version = ZSTD_isLegacy(src, compressedSize); + (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */ + switch(version) + { +#if (ZSTD_LEGACY_SUPPORT <= 1) + case 1 : + return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 2) + case 2 : + return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 3) + case 3 : + return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : + return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : + { size_t result; + ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx(); + if (zd==NULL) return ERROR(memory_allocation); + result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); + ZSTDv05_freeDCtx(zd); + return result; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : + { size_t result; + ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx(); + if (zd==NULL) return ERROR(memory_allocation); + result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); + ZSTDv06_freeDCtx(zd); + return result; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : + { size_t result; + ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx(); + if (zd==NULL) return ERROR(memory_allocation); + result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); + ZSTDv07_freeDCtx(zd); + return result; + } +#endif + default : + return ERROR(prefix_unknown); + } +} + +MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, + size_t compressedSize) +{ + U32 const version = ZSTD_isLegacy(src, compressedSize); + switch(version) + { +#if (ZSTD_LEGACY_SUPPORT <= 1) + case 1 : + return ZSTDv01_findFrameCompressedSize(src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 2) + case 2 : + return ZSTDv02_findFrameCompressedSize(src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 3) + case 3 : + return ZSTDv03_findFrameCompressedSize(src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : + return ZSTDv04_findFrameCompressedSize(src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : + return ZSTDv05_findFrameCompressedSize(src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : + return ZSTDv06_findFrameCompressedSize(src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : + return ZSTDv07_findFrameCompressedSize(src, compressedSize); +#endif + default : + return ERROR(prefix_unknown); + } +} + +MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version) +{ + switch(version) + { + default : + case 1 : + case 2 : + case 3 : + (void)legacyContext; + return ERROR(version_unsupported); +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext); +#endif + } +} + + +MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion, + const void* dict, size_t dictSize) +{ + DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion); + if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion); + switch(newVersion) + { + default : + case 1 : + case 2 : + case 3 : + (void)dict; (void)dictSize; + return 0; +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : + { + ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext; + if (dctx==NULL) return ERROR(memory_allocation); + ZBUFFv04_decompressInit(dctx); + ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize); + *legacyContext = dctx; + return 0; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : + { + ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext; + if (dctx==NULL) return ERROR(memory_allocation); + ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize); + *legacyContext = dctx; + return 0; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : + { + ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext; + if (dctx==NULL) return ERROR(memory_allocation); + ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize); + *legacyContext = dctx; + return 0; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : + { + ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext; + if (dctx==NULL) return ERROR(memory_allocation); + ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize); + *legacyContext = dctx; + return 0; + } +#endif + } +} + + + +MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version, + ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version); + switch(version) + { + default : + case 1 : + case 2 : + case 3 : + (void)legacyContext; (void)output; (void)input; + return ERROR(version_unsupported); +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : + { + ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext; + const void* src = (const char*)input->src + input->pos; + size_t readSize = input->size - input->pos; + void* dst = (char*)output->dst + output->pos; + size_t decodedSize = output->size - output->pos; + size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize); + output->pos += decodedSize; + input->pos += readSize; + return hintSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : + { + ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext; + const void* src = (const char*)input->src + input->pos; + size_t readSize = input->size - input->pos; + void* dst = (char*)output->dst + output->pos; + size_t decodedSize = output->size - output->pos; + size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize); + output->pos += decodedSize; + input->pos += readSize; + return hintSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : + { + ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext; + const void* src = (const char*)input->src + input->pos; + size_t readSize = input->size - input->pos; + void* dst = (char*)output->dst + output->pos; + size_t decodedSize = output->size - output->pos; + size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize); + output->pos += decodedSize; + input->pos += readSize; + return hintSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : + { + ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext; + const void* src = (const char*)input->src + input->pos; + size_t readSize = input->size - input->pos; + void* dst = (char*)output->dst + output->pos; + size_t decodedSize = output->size - output->pos; + size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize); + output->pos += decodedSize; + input->pos += readSize; + return hintSize; + } +#endif + } +} + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_LEGACY_H */ diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.c b/vendor/github.com/DataDog/zstd/zstd_opt.c new file mode 100644 index 00000000000..f63f0c58522 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_opt.c @@ -0,0 +1,923 @@ +/* + * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "zstd_opt.h" + + +#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats. Also used for matchSum (?) */ +#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ +#define ZSTD_MAX_PRICE (1<<30) + + +/*-************************************* +* Price functions for optimal parser +***************************************/ +static void ZSTD_setLog2Prices(optState_t* optPtr) +{ + optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1); + optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1); + optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1); + optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1); +} + + +static void ZSTD_rescaleFreqs(optState_t* const optPtr, + const BYTE* const src, size_t const srcSize) +{ + optPtr->staticPrices = 0; + + if (optPtr->litLengthSum == 0) { /* first init */ + unsigned u; + if (srcSize <= 1024) optPtr->staticPrices = 1; + + assert(optPtr->litFreq!=NULL); + for (u=0; u<=MaxLit; u++) + optPtr->litFreq[u] = 0; + for (u=0; ulitFreq[src[u]]++; + optPtr->litSum = 0; + for (u=0; u<=MaxLit; u++) { + optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> ZSTD_FREQ_DIV); + optPtr->litSum += optPtr->litFreq[u]; + } + + for (u=0; u<=MaxLL; u++) + optPtr->litLengthFreq[u] = 1; + optPtr->litLengthSum = MaxLL+1; + for (u=0; u<=MaxML; u++) + optPtr->matchLengthFreq[u] = 1; + optPtr->matchLengthSum = MaxML+1; + for (u=0; u<=MaxOff; u++) + optPtr->offCodeFreq[u] = 1; + optPtr->offCodeSum = (MaxOff+1); + + } else { + unsigned u; + + optPtr->litSum = 0; + for (u=0; u<=MaxLit; u++) { + optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> (ZSTD_FREQ_DIV+1)); + optPtr->litSum += optPtr->litFreq[u]; + } + optPtr->litLengthSum = 0; + for (u=0; u<=MaxLL; u++) { + optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1)); + optPtr->litLengthSum += optPtr->litLengthFreq[u]; + } + optPtr->matchLengthSum = 0; + for (u=0; u<=MaxML; u++) { + optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV); + optPtr->matchLengthSum += optPtr->matchLengthFreq[u]; + } + optPtr->offCodeSum = 0; + for (u=0; u<=MaxOff; u++) { + optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV); + optPtr->offCodeSum += optPtr->offCodeFreq[u]; + } + } + + ZSTD_setLog2Prices(optPtr); +} + + +/* ZSTD_rawLiteralsCost() : + * cost of literals (only) in given segment (which length can be null) + * does not include cost of literalLength symbol */ +static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, + const optState_t* const optPtr) +{ + if (optPtr->staticPrices) return (litLength*6); /* 6 bit per literal - no statistic used */ + if (litLength == 0) return 0; + + /* literals */ + { U32 u; + U32 cost = litLength * optPtr->log2litSum; + for (u=0; u < litLength; u++) + cost -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1); + return cost; + } +} + +/* ZSTD_litLengthPrice() : + * cost of literalLength symbol */ +static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr) +{ + if (optPtr->staticPrices) return ZSTD_highbit32((U32)litLength+1); + + /* literal Length */ + { U32 const llCode = ZSTD_LLcode(litLength); + U32 const price = LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1); + return price; + } +} + +/* ZSTD_litLengthPrice() : + * cost of the literal part of a sequence, + * including literals themselves, and literalLength symbol */ +static U32 ZSTD_fullLiteralsCost(const BYTE* const literals, U32 const litLength, + const optState_t* const optPtr) +{ + return ZSTD_rawLiteralsCost(literals, litLength, optPtr) + + ZSTD_litLengthPrice(litLength, optPtr); +} + +/* ZSTD_litLengthContribution() : + * @return ( cost(litlength) - cost(0) ) + * this value can then be added to rawLiteralsCost() + * to provide a cost which is directly comparable to a match ending at same position */ +static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr) +{ + if (optPtr->staticPrices) return ZSTD_highbit32(litLength+1); + + /* literal Length */ + { U32 const llCode = ZSTD_LLcode(litLength); + int const contribution = LL_bits[llCode] + + ZSTD_highbit32(optPtr->litLengthFreq[0]+1) + - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1); +#if 1 + return contribution; +#else + return MAX(0, contribution); /* sometimes better, sometimes not ... */ +#endif + } +} + +/* ZSTD_literalsContribution() : + * creates a fake cost for the literals part of a sequence + * which can be compared to the ending cost of a match + * should a new match start at this position */ +static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength, + const optState_t* const optPtr) +{ + int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr) + + ZSTD_litLengthContribution(litLength, optPtr); + return contribution; +} + +/* ZSTD_getMatchPrice() : + * Provides the cost of the match part (offset + matchLength) of a sequence + * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. + * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ +FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice( + U32 const offset, U32 const matchLength, + const optState_t* const optPtr, + int const optLevel) +{ + U32 price; + U32 const offCode = ZSTD_highbit32(offset+1); + U32 const mlBase = matchLength - MINMATCH; + assert(matchLength >= MINMATCH); + + if (optPtr->staticPrices) /* fixed scheme, do not use statistics */ + return ZSTD_highbit32((U32)mlBase+1) + 16 + offCode; + + price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1); + if ((optLevel<2) /*static*/ && offCode >= 20) price += (offCode-19)*2; /* handicap for long distance offsets, favor decompression speed */ + + /* match Length */ + { U32 const mlCode = ZSTD_MLcode(mlBase); + price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1); + } + + DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price); + return price; +} + +static void ZSTD_updateStats(optState_t* const optPtr, + U32 litLength, const BYTE* literals, + U32 offsetCode, U32 matchLength) +{ + /* literals */ + { U32 u; + for (u=0; u < litLength; u++) + optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; + optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; + } + + /* literal Length */ + { U32 const llCode = ZSTD_LLcode(litLength); + optPtr->litLengthFreq[llCode]++; + optPtr->litLengthSum++; + } + + /* match offset code (0-2=>repCode; 3+=>offset+2) */ + { U32 const offCode = ZSTD_highbit32(offsetCode+1); + assert(offCode <= MaxOff); + optPtr->offCodeFreq[offCode]++; + optPtr->offCodeSum++; + } + + /* match Length */ + { U32 const mlBase = matchLength - MINMATCH; + U32 const mlCode = ZSTD_MLcode(mlBase); + optPtr->matchLengthFreq[mlCode]++; + optPtr->matchLengthSum++; + } +} + + +/* ZSTD_readMINMATCH() : + * function safe only for comparisons + * assumption : memPtr must be at least 4 bytes before end of buffer */ +MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) +{ + switch (length) + { + default : + case 4 : return MEM_read32(memPtr); + case 3 : if (MEM_isLittleEndian()) + return MEM_read32(memPtr)<<8; + else + return MEM_read32(memPtr)>>8; + } +} + + +/* Update hashTable3 up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE* const ip) +{ + U32* const hashTable3 = ms->hashTable3; + U32 const hashLog3 = ms->hashLog3; + const BYTE* const base = ms->window.base; + U32 idx = ms->nextToUpdate3; + U32 const target = ms->nextToUpdate3 = (U32)(ip - base); + size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3); + assert(hashLog3 > 0); + + while(idx < target) { + hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; + idx++; + } + + return hashTable3[hash3]; +} + + +/*-************************************* +* Binary Tree search +***************************************/ +/** ZSTD_insertBt1() : add one or multiple positions to tree. + * ip : assumed <= iend-8 . + * @return : nb of positions added */ +static U32 ZSTD_insertBt1( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* const ip, const BYTE* const iend, + U32 const mls, U32 const extDict) +{ + U32* const hashTable = ms->hashTable; + U32 const hashLog = cParams->hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 matchIndex = hashTable[h]; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* match; + const U32 current = (U32)(ip-base); + const U32 btLow = btMask >= current ? 0 : current - btMask; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = smallerPtr + 1; + U32 dummy32; /* to be nullified at the end */ + U32 const windowLow = ms->window.lowLimit; + U32 matchEndIdx = current+8+1; + size_t bestLength = 8; + U32 nbCompares = 1U << cParams->searchLog; +#ifdef ZSTD_C_PREDICT + U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); + U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); + predictedSmall += (predictedSmall>0); + predictedLarge += (predictedLarge>0); +#endif /* ZSTD_C_PREDICT */ + + DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current); + + assert(ip <= iend-8); /* required for h calculation */ + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + assert(matchIndex < current); + +#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ + const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ + if (matchIndex == predictedSmall) { + /* no need to check length, result known */ + *smallerPtr = matchIndex; + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + predictedSmall = predictPtr[1] + (predictPtr[1]>0); + continue; + } + if (matchIndex == predictedLarge) { + *largerPtr = matchIndex; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + predictedLarge = predictPtr[0] + (predictPtr[0]>0); + continue; + } +#endif + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + assert(matchIndex+matchLength >= dictLimit); /* might be wrong if extDict is incorrectly set to 0 */ + match = base + matchIndex; + matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + bestLength = matchLength; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + } + + if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ + } + + if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ + matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ + assert(matchEndIdx > current + 8); + return matchEndIdx - (current + 8); +} + +FORCE_INLINE_TEMPLATE +void ZSTD_updateTree_internal( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* const ip, const BYTE* const iend, + const U32 mls, const U32 extDict) +{ + const BYTE* const base = ms->window.base; + U32 const target = (U32)(ip - base); + U32 idx = ms->nextToUpdate; + DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (extDict:%u)", + idx, target, extDict); + + while(idx < target) + idx += ZSTD_insertBt1(ms, cParams, base+idx, iend, mls, extDict); + ms->nextToUpdate = target; +} + +void ZSTD_updateTree( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* iend) +{ + ZSTD_updateTree_internal(ms, cParams, ip, iend, cParams->searchLength, 0 /*extDict*/); +} + +FORCE_INLINE_TEMPLATE +U32 ZSTD_insertBtAndGetAllMatches ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* const ip, const BYTE* const iLimit, int const extDict, + U32 rep[ZSTD_REP_NUM], U32 const ll0, + ZSTD_match_t* matches, const U32 lengthToBeat, U32 const mls /* template */) +{ + U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); + const BYTE* const base = ms->window.base; + U32 const current = (U32)(ip-base); + U32 const hashLog = cParams->hashLog; + U32 const minMatch = (mls==3) ? 3 : 4; + U32* const hashTable = ms->hashTable; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32 matchIndex = hashTable[h]; + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask= (1U << btLog) - 1; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const dictBase = ms->window.dictBase; + U32 const dictLimit = ms->window.dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + U32 const btLow = btMask >= current ? 0 : current - btMask; + U32 const windowLow = ms->window.lowLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */ + U32 dummy32; /* to be nullified at the end */ + U32 mnum = 0; + U32 nbCompares = 1U << cParams->searchLog; + + size_t bestLength = lengthToBeat-1; + DEBUGLOG(7, "ZSTD_insertBtAndGetAllMatches"); + + /* check repCode */ + { U32 const lastR = ZSTD_REP_NUM + ll0; + U32 repCode; + for (repCode = ll0; repCode < lastR; repCode++) { + U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; + U32 const repIndex = current - repOffset; + U32 repLen = 0; + assert(current >= dictLimit); + if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */ + if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) { + repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch; + } + } else { /* repIndex < dictLimit || repIndex >= current */ + const BYTE* const repMatch = dictBase + repIndex; + assert(current >= windowLow); + if ( extDict /* this case only valid in extDict mode */ + && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */ + & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) + && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { + repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; + } } + /* save longer solution */ + if (repLen > bestLength) { + DEBUGLOG(8, "found rep-match %u of length %u", + repCode - ll0, (U32)repLen); + bestLength = repLen; + matches[mnum].off = repCode - ll0; + matches[mnum].len = (U32)repLen; + mnum++; + if ( (repLen > sufficient_len) + | (ip+repLen == iLimit) ) { /* best possible */ + return mnum; + } } } } + + /* HC3 match finder */ + if ((mls == 3) /*static*/ && (bestLength < mls)) { + U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip); + if ((matchIndex3 > windowLow) + & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { + size_t mlen; + if ((!extDict) /*static*/ || (matchIndex3 >= dictLimit)) { + const BYTE* const match = base + matchIndex3; + mlen = ZSTD_count(ip, match, iLimit); + } else { + const BYTE* const match = dictBase + matchIndex3; + mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); + } + + /* save best solution */ + if (mlen >= mls /* == 3 > bestLength */) { + DEBUGLOG(8, "found small match with hlog3, of length %u", + (U32)mlen); + bestLength = mlen; + assert(current > matchIndex3); + assert(mnum==0); /* no prior solution */ + matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE; + matches[0].len = (U32)mlen; + mnum = 1; + if ( (mlen > sufficient_len) | + (ip+mlen == iLimit) ) { /* best possible length */ + ms->nextToUpdate = current+1; /* skip insertion */ + return 1; + } } } } + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + assert(current > matchIndex); + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */ + match = base + matchIndex; + matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit); + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* prepare for match[matchLength] */ + } + + if (matchLength > bestLength) { + DEBUGLOG(8, "found match of length %u at distance %u", + (U32)matchLength, current - matchIndex); + assert(matchEndIdx > matchIndex); + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + bestLength = matchLength; + matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; + matches[mnum].len = (U32)matchLength; + mnum++; + if (matchLength > ZSTD_OPT_NUM) break; + if (ip+matchLength == iLimit) { /* equal : no way to know if inf or sup */ + break; /* drop, to preserve bt consistency (miss a little bit of compression) */ + } + } + + if (match[matchLength] < ip[matchLength]) { + /* match smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */ + matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */ + } else { + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + + assert(matchEndIdx > current+8); + ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ + return mnum; +} + + +FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* const iHighLimit, int const extDict, + U32 rep[ZSTD_REP_NUM], U32 const ll0, + ZSTD_match_t* matches, U32 const lengthToBeat) +{ + U32 const matchLengthSearch = cParams->searchLength; + DEBUGLOG(7, "ZSTD_BtGetAllMatches"); + if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree_internal(ms, cParams, ip, iHighLimit, matchLengthSearch, extDict); + switch(matchLengthSearch) + { + case 3 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 3); + default : + case 4 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 4); + case 5 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 5); + case 7 : + case 6 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 6); + } +} + + +/*-******************************* +* Optimal parser +*********************************/ +typedef struct repcodes_s { + U32 rep[3]; +} repcodes_t; + +repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0) +{ + repcodes_t newReps; + if (offset >= ZSTD_REP_NUM) { /* full offset */ + newReps.rep[2] = rep[1]; + newReps.rep[1] = rep[0]; + newReps.rep[0] = offset - ZSTD_REP_MOVE; + } else { /* repcode */ + U32 const repCode = offset + ll0; + if (repCode > 0) { /* note : if repCode==0, no change */ + U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; + newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2]; + newReps.rep[1] = rep[0]; + newReps.rep[0] = currentOffset; + } else { /* repCode == 0 */ + memcpy(&newReps, rep, sizeof(newReps)); + } + } + return newReps; +} + + +typedef struct { + const BYTE* anchor; + U32 litlen; + U32 rawLitCost; +} cachedLiteralPrice_t; + +static U32 ZSTD_rawLiteralsCost_cached( + cachedLiteralPrice_t* const cachedLitPrice, + const BYTE* const anchor, U32 const litlen, + const optState_t* const optStatePtr) +{ + U32 startCost; + U32 remainingLength; + const BYTE* startPosition; + + if (anchor == cachedLitPrice->anchor) { + startCost = cachedLitPrice->rawLitCost; + startPosition = anchor + cachedLitPrice->litlen; + assert(litlen >= cachedLitPrice->litlen); + remainingLength = litlen - cachedLitPrice->litlen; + } else { + startCost = 0; + startPosition = anchor; + remainingLength = litlen; + } + + { U32 const rawLitCost = startCost + ZSTD_rawLiteralsCost(startPosition, remainingLength, optStatePtr); + cachedLitPrice->anchor = anchor; + cachedLitPrice->litlen = litlen; + cachedLitPrice->rawLitCost = rawLitCost; + return rawLitCost; + } +} + +static U32 ZSTD_fullLiteralsCost_cached( + cachedLiteralPrice_t* const cachedLitPrice, + const BYTE* const anchor, U32 const litlen, + const optState_t* const optStatePtr) +{ + return ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr) + + ZSTD_litLengthPrice(litlen, optStatePtr); +} + +static int ZSTD_literalsContribution_cached( + cachedLiteralPrice_t* const cachedLitPrice, + const BYTE* const anchor, U32 const litlen, + const optState_t* const optStatePtr) +{ + int const contribution = ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr) + + ZSTD_litLengthContribution(litlen, optStatePtr); + return contribution; +} + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, + const void* src, size_t srcSize, + const int optLevel, const int extDict) +{ + optState_t* const optStatePtr = &ms->opt; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ms->window.base; + const BYTE* const prefixStart = base + ms->window.dictLimit; + + U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); + U32 const minMatch = (cParams->searchLength == 3) ? 3 : 4; + + ZSTD_optimal_t* const opt = optStatePtr->priceTable; + ZSTD_match_t* const matches = optStatePtr->matchTable; + cachedLiteralPrice_t cachedLitPrice; + + /* init */ + DEBUGLOG(5, "ZSTD_compressBlock_opt_generic"); + ms->nextToUpdate3 = ms->nextToUpdate; + ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize); + ip += (ip==prefixStart); + memset(&cachedLitPrice, 0, sizeof(cachedLitPrice)); + + /* Match Loop */ + while (ip < ilimit) { + U32 cur, last_pos = 0; + U32 best_mlen, best_off; + + /* find first match */ + { U32 const litlen = (U32)(ip - anchor); + U32 const ll0 = !litlen; + U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, ip, iend, extDict, rep, ll0, matches, minMatch); + if (!nbMatches) { ip++; continue; } + + /* initialize opt[0] */ + { U32 i ; for (i=0; i immediate encoding */ + { U32 const maxML = matches[nbMatches-1].len; + DEBUGLOG(7, "found %u matches of maxLength=%u and offset=%u at cPos=%u => start new serie", + nbMatches, maxML, matches[nbMatches-1].off, (U32)(ip-prefixStart)); + + if (maxML > sufficient_len) { + best_mlen = maxML; + best_off = matches[nbMatches-1].off; + DEBUGLOG(7, "large match (%u>%u), immediate encoding", + best_mlen, sufficient_len); + cur = 0; + last_pos = 1; + goto _shortestPath; + } } + + /* set prices for first matches starting position == 0 */ + { U32 const literalsPrice = ZSTD_fullLiteralsCost_cached(&cachedLitPrice, anchor, litlen, optStatePtr); + U32 pos; + U32 matchNb; + for (pos = 0; pos < minMatch; pos++) { + opt[pos].mlen = 1; + opt[pos].price = ZSTD_MAX_PRICE; + } + for (matchNb = 0; matchNb < nbMatches; matchNb++) { + U32 const offset = matches[matchNb].off; + U32 const end = matches[matchNb].len; + repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0); + for ( ; pos <= end ; pos++ ) { + U32 const matchPrice = literalsPrice + ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel); + DEBUGLOG(7, "rPos:%u => set initial price : %u", + pos, matchPrice); + opt[pos].mlen = pos; + opt[pos].off = offset; + opt[pos].litlen = litlen; + opt[pos].price = matchPrice; + memcpy(opt[pos].rep, &repHistory, sizeof(repHistory)); + } } + last_pos = pos-1; + } + } + + /* check further positions */ + for (cur = 1; cur <= last_pos; cur++) { + const BYTE* const inr = ip + cur; + assert(cur < ZSTD_OPT_NUM); + + /* Fix current position with one literal if cheaper */ + { U32 const litlen = (opt[cur-1].mlen == 1) ? opt[cur-1].litlen + 1 : 1; + int price; /* note : contribution can be negative */ + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_literalsContribution(inr-litlen, litlen, optStatePtr); + } else { + price = ZSTD_literalsContribution_cached(&cachedLitPrice, anchor, litlen, optStatePtr); + } + assert(price < 1000000000); /* overflow check */ + if (price <= opt[cur].price) { + DEBUGLOG(7, "rPos:%u : better price (%u<%u) using literal", + cur, price, opt[cur].price); + opt[cur].mlen = 1; + opt[cur].off = 0; + opt[cur].litlen = litlen; + opt[cur].price = price; + memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep)); + } } + + /* last match must start at a minimum distance of 8 from oend */ + if (inr > ilimit) continue; + + if (cur == last_pos) break; + + if ( (optLevel==0) /*static*/ + && (opt[cur+1].price <= opt[cur].price) ) + continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ + + { U32 const ll0 = (opt[cur].mlen != 1); + U32 const litlen = (opt[cur].mlen == 1) ? opt[cur].litlen : 0; + U32 const previousPrice = (cur > litlen) ? opt[cur-litlen].price : 0; + U32 const basePrice = previousPrice + ZSTD_fullLiteralsCost(inr-litlen, litlen, optStatePtr); + U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, inr, iend, extDict, opt[cur].rep, ll0, matches, minMatch); + U32 matchNb; + if (!nbMatches) continue; + + { U32 const maxML = matches[nbMatches-1].len; + DEBUGLOG(7, "rPos:%u, found %u matches, of maxLength=%u", + cur, nbMatches, maxML); + + if ( (maxML > sufficient_len) + | (cur + maxML >= ZSTD_OPT_NUM) ) { + best_mlen = maxML; + best_off = matches[nbMatches-1].off; + last_pos = cur + 1; + goto _shortestPath; + } + } + + /* set prices using matches found at position == cur */ + for (matchNb = 0; matchNb < nbMatches; matchNb++) { + U32 const offset = matches[matchNb].off; + repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0); + U32 const lastML = matches[matchNb].len; + U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; + U32 mlen; + + DEBUGLOG(7, "testing match %u => offCode=%u, mlen=%u, llen=%u", + matchNb, matches[matchNb].off, lastML, litlen); + + for (mlen = lastML; mlen >= startML; mlen--) { + U32 const pos = cur + mlen; + int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + + if ((pos > last_pos) || (price < opt[pos].price)) { + DEBUGLOG(7, "rPos:%u => new better price (%u<%u)", + pos, price, opt[pos].price); + while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } + opt[pos].mlen = mlen; + opt[pos].off = offset; + opt[pos].litlen = litlen; + opt[pos].price = price; + memcpy(opt[pos].rep, &repHistory, sizeof(repHistory)); + } else { + if (optLevel==0) break; /* gets ~+10% speed for about -0.01 ratio loss */ + } + } } } + } /* for (cur = 1; cur <= last_pos; cur++) */ + + best_mlen = opt[last_pos].mlen; + best_off = opt[last_pos].off; + cur = last_pos - best_mlen; + +_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ + assert(opt[0].mlen == 1); + + /* reverse traversal */ + DEBUGLOG(7, "start reverse traversal (last_pos:%u, cur:%u)", + last_pos, cur); + { U32 selectedMatchLength = best_mlen; + U32 selectedOffset = best_off; + U32 pos = cur; + while (1) { + U32 const mlen = opt[pos].mlen; + U32 const off = opt[pos].off; + opt[pos].mlen = selectedMatchLength; + opt[pos].off = selectedOffset; + selectedMatchLength = mlen; + selectedOffset = off; + if (mlen > pos) break; + pos -= mlen; + } } + + /* save sequences */ + { U32 pos; + for (pos=0; pos < last_pos; ) { + U32 const llen = (U32)(ip - anchor); + U32 const mlen = opt[pos].mlen; + U32 const offset = opt[pos].off; + if (mlen == 1) { ip++; pos++; continue; } /* literal position => move on */ + pos += mlen; ip += mlen; + + /* repcodes update : like ZSTD_updateRep(), but update in place */ + if (offset >= ZSTD_REP_NUM) { /* full offset */ + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = offset - ZSTD_REP_MOVE; + } else { /* repcode */ + U32 const repCode = offset + (llen==0); + if (repCode) { /* note : if repCode==0, no change */ + U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; + if (repCode >= 2) rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = currentOffset; + } + } + + ZSTD_updateStats(optStatePtr, llen, anchor, offset, mlen); + ZSTD_storeSeq(seqStore, llen, anchor, offset, mlen-MINMATCH); + anchor = ip; + } } + ZSTD_setLog2Prices(optStatePtr); + } /* while (ip < ilimit) */ + + /* Return the last literals size */ + return iend - anchor; +} + + +size_t ZSTD_compressBlock_btopt( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_compressBlock_btopt"); + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 0 /*extDict*/); +} + +size_t ZSTD_compressBlock_btultra( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 0 /*extDict*/); +} + +size_t ZSTD_compressBlock_btopt_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 1 /*extDict*/); +} + +size_t ZSTD_compressBlock_btultra_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 1 /*extDict*/); +} diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.h b/vendor/github.com/DataDog/zstd/zstd_opt.h new file mode 100644 index 00000000000..b8dc389f317 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_opt.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_OPT_H +#define ZSTD_OPT_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "zstd_compress_internal.h" + +void ZSTD_updateTree( + ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams, + const BYTE* ip, const BYTE* iend); /* used in ZSTD_loadDictionaryContent() */ + +size_t ZSTD_compressBlock_btopt( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btultra( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); + +size_t ZSTD_compressBlock_btopt_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btultra_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_OPT_H */ diff --git a/vendor/github.com/DataDog/zstd/zstd_stream.go b/vendor/github.com/DataDog/zstd/zstd_stream.go new file mode 100644 index 00000000000..d5d1336deb6 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_stream.go @@ -0,0 +1,291 @@ +package zstd + +/* +#define ZSTD_STATIC_LINKING_ONLY +#define ZBUFF_DISABLE_DEPRECATE_WARNINGS +#include "zstd.h" +#include "zbuff.h" +*/ +import "C" +import ( + "errors" + "fmt" + "io" + "unsafe" +) + +var errShortRead = errors.New("short read") + +// Writer is an io.WriteCloser that zstd-compresses its input. +type Writer struct { + CompressionLevel int + + ctx *C.ZSTD_CCtx + dict []byte + dstBuffer []byte + firstError error + underlyingWriter io.Writer +} + +func resize(in []byte, newSize int) []byte { + if in == nil { + return make([]byte, newSize) + } + if newSize <= cap(in) { + return in[:newSize] + } + toAdd := newSize - len(in) + return append(in, make([]byte, toAdd)...) +} + +// NewWriter creates a new Writer with default compression options. Writes to +// the writer will be written in compressed form to w. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevelDict(w, DefaultCompression, nil) +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming default compression. +// +// The level can be DefaultCompression or any integer value between BestSpeed +// and BestCompression inclusive. +func NewWriterLevel(w io.Writer, level int) *Writer { + return NewWriterLevelDict(w, level, nil) + +} + +// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to +// compress with. If the dictionary is empty or nil it is ignored. The dictionary +// should not be modified until the writer is closed. +func NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer { + var err error + ctx := C.ZSTD_createCCtx() + + if dict == nil { + err = getError(int(C.ZSTD_compressBegin(ctx, + C.int(level)))) + } else { + err = getError(int(C.ZSTD_compressBegin_usingDict( + ctx, + unsafe.Pointer(&dict[0]), + C.size_t(len(dict)), + C.int(level)))) + } + + return &Writer{ + CompressionLevel: level, + ctx: ctx, + dict: dict, + dstBuffer: make([]byte, CompressBound(1024)), + firstError: err, + underlyingWriter: w, + } +} + +// Write writes a compressed form of p to the underlying io.Writer. +func (w *Writer) Write(p []byte) (int, error) { + if w.firstError != nil { + return 0, w.firstError + } + if len(p) == 0 { + return 0, nil + } + // Check if dstBuffer is enough + if len(w.dstBuffer) < CompressBound(len(p)) { + w.dstBuffer = make([]byte, CompressBound(len(p))) + } + + retCode := C.ZSTD_compressContinue( + w.ctx, + unsafe.Pointer(&w.dstBuffer[0]), + C.size_t(len(w.dstBuffer)), + unsafe.Pointer(&p[0]), + C.size_t(len(p))) + + if err := getError(int(retCode)); err != nil { + return 0, err + } + written := int(retCode) + + // Write to underlying buffer + _, err := w.underlyingWriter.Write(w.dstBuffer[:written]) + + // Same behaviour as zlib, we can't know how much data we wrote, only + // if there was an error + if err != nil { + return 0, err + } + return len(p), err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer and freeing objects, but does not close the underlying io.Writer. +func (w *Writer) Close() error { + retCode := C.ZSTD_compressEnd( + w.ctx, + unsafe.Pointer(&w.dstBuffer[0]), + C.size_t(len(w.dstBuffer)), + unsafe.Pointer(nil), + C.size_t(0)) + + if err := getError(int(retCode)); err != nil { + return err + } + written := int(retCode) + retCode = C.ZSTD_freeCCtx(w.ctx) // Safely close buffer before writing the end + + if err := getError(int(retCode)); err != nil { + return err + } + + _, err := w.underlyingWriter.Write(w.dstBuffer[:written]) + if err != nil { + return err + } + return nil +} + +// reader is an io.ReadCloser that decompresses when read from. +type reader struct { + ctx *C.ZBUFF_DCtx + compressionBuffer []byte + compressionLeft int + decompressionBuffer []byte + decompOff int + decompSize int + dict []byte + firstError error + recommendedSrcSize int + underlyingReader io.Reader +} + +// NewReader creates a new io.ReadCloser. Reads from the returned ReadCloser +// read and decompress data from r. It is the caller's responsibility to call +// Close on the ReadCloser when done. If this is not done, underlying objects +// in the zstd library will not be freed. +func NewReader(r io.Reader) io.ReadCloser { + return NewReaderDict(r, nil) +} + +// NewReaderDict is like NewReader but uses a preset dictionary. NewReaderDict +// ignores the dictionary if it is nil. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + var err error + ctx := C.ZBUFF_createDCtx() + if len(dict) == 0 { + err = getError(int(C.ZBUFF_decompressInit(ctx))) + } else { + err = getError(int(C.ZBUFF_decompressInitDictionary( + ctx, + unsafe.Pointer(&dict[0]), + C.size_t(len(dict))))) + } + cSize := int(C.ZBUFF_recommendedDInSize()) + dSize := int(C.ZBUFF_recommendedDOutSize()) + if cSize <= 0 { + panic(fmt.Errorf("ZBUFF_recommendedDInSize() returned invalid size: %v", cSize)) + } + if dSize <= 0 { + panic(fmt.Errorf("ZBUFF_recommendedDOutSize() returned invalid size: %v", dSize)) + } + + compressionBuffer := make([]byte, cSize) + decompressionBuffer := make([]byte, dSize) + return &reader{ + ctx: ctx, + dict: dict, + compressionBuffer: compressionBuffer, + decompressionBuffer: decompressionBuffer, + firstError: err, + recommendedSrcSize: cSize, + underlyingReader: r, + } +} + +// Close frees the allocated C objects +func (r *reader) Close() error { + return getError(int(C.ZBUFF_freeDCtx(r.ctx))) +} + +func (r *reader) Read(p []byte) (int, error) { + + // If we already have enough bytes, return + if r.decompSize-r.decompOff >= len(p) { + copy(p, r.decompressionBuffer[r.decompOff:]) + r.decompOff += len(p) + return len(p), nil + } + + copy(p, r.decompressionBuffer[r.decompOff:r.decompSize]) + got := r.decompSize - r.decompOff + r.decompSize = 0 + r.decompOff = 0 + + for got < len(p) { + // Populate src + src := r.compressionBuffer + reader := r.underlyingReader + n, err := TryReadFull(reader, src[r.compressionLeft:]) + if err != nil && err != errShortRead { // Handle underlying reader errors first + return 0, fmt.Errorf("failed to read from underlying reader: %s", err) + } else if n == 0 && r.compressionLeft == 0 { + return got, io.EOF + } + src = src[:r.compressionLeft+n] + + // C code + cSrcSize := C.size_t(len(src)) + cDstSize := C.size_t(len(r.decompressionBuffer)) + retCode := int(C.ZBUFF_decompressContinue( + r.ctx, + unsafe.Pointer(&r.decompressionBuffer[0]), + &cDstSize, + unsafe.Pointer(&src[0]), + &cSrcSize)) + + if err = getError(retCode); err != nil { + return 0, fmt.Errorf("failed to decompress: %s", err) + } + + // Put everything in buffer + if int(cSrcSize) < len(src) { + left := src[int(cSrcSize):] + copy(r.compressionBuffer, left) + } + r.compressionLeft = len(src) - int(cSrcSize) + r.decompSize = int(cDstSize) + r.decompOff = copy(p[got:], r.decompressionBuffer[:r.decompSize]) + got += r.decompOff + + // Resize buffers + nsize := retCode // Hint for next src buffer size + if nsize <= 0 { + // Reset to recommended size + nsize = r.recommendedSrcSize + } + if nsize < r.compressionLeft { + nsize = r.compressionLeft + } + r.compressionBuffer = resize(r.compressionBuffer, nsize) + } + return got, nil +} + +// TryReadFull reads buffer just as ReadFull does +// Here we expect that buffer may end and we do not return ErrUnexpectedEOF as ReadAtLeast does. +// We return errShortRead instead to distinguish short reads and failures. +// We cannot use ReadFull/ReadAtLeast because it masks Reader errors, such as network failures +// and causes panic instead of error. +func TryReadFull(r io.Reader, buf []byte) (n int, err error) { + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n == len(buf) && err == io.EOF { + err = nil // EOF at the end is somewhat expected + } else if err == io.EOF { + err = errShortRead + } + return +} diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.c b/vendor/github.com/DataDog/zstd/zstd_v01.c new file mode 100644 index 00000000000..ae1cb2ce5aa --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_v01.c @@ -0,0 +1,2127 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/****************************************** +* Includes +******************************************/ +#include /* size_t, ptrdiff_t */ +#include "zstd_v01.h" +#include "error_private.h" + + +/****************************************** +* Static allocation +******************************************/ +/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ +#define FSE_MAX_MEMORY_USAGE 14 +#define FSE_DEFAULT_MEMORY_USAGE 13 + +/* FSE_MAX_SYMBOL_VALUE : +* Maximum symbol value authorized. +* Required for proper stack allocation */ +#define FSE_MAX_SYMBOL_VALUE 255 + + +/**************************************************************** +* template functions type & suffix +****************************************************************/ +#define FSE_FUNCTION_TYPE BYTE +#define FSE_FUNCTION_EXTENSION + + +/**************************************************************** +* Byte symbol type +****************************************************************/ +typedef struct +{ + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; /* size == U32 */ + + + +/**************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# include /* For Visual 2005 */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +#else +# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/**************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include /* printf (debug) */ + + +#ifndef MEM_ACCESS_MODULE +#define MEM_ACCESS_MODULE +/**************************************************************** +* Basic Types +*****************************************************************/ +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# include +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef int16_t S16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; +typedef int64_t S64; +#else +typedef unsigned char BYTE; +typedef unsigned short U16; +typedef signed short S16; +typedef unsigned int U32; +typedef signed int S32; +typedef unsigned long long U64; +typedef signed long long S64; +#endif + +#endif /* MEM_ACCESS_MODULE */ + +/**************************************************************** +* Memory I/O +*****************************************************************/ +/* FSE_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets generating assembly depending on alignment. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define FSE_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) +# define FSE_FORCE_MEMORY_ACCESS 1 +# endif +#endif + + +static unsigned FSE_32bits(void) +{ + return sizeof(void*)==4; +} + +static unsigned FSE_isLittleEndian(void) +{ + const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2) + +static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; } +static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; } +static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; + +static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } +static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } + +#else + +static U16 FSE_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static U32 FSE_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static U64 FSE_read64(const void* memPtr) +{ + U64 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +#endif // FSE_FORCE_MEMORY_ACCESS + +static U16 FSE_readLE16(const void* memPtr) +{ + if (FSE_isLittleEndian()) + return FSE_read16(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U16)(p[0] + (p[1]<<8)); + } +} + +static U32 FSE_readLE32(const void* memPtr) +{ + if (FSE_isLittleEndian()) + return FSE_read32(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); + } +} + + +static U64 FSE_readLE64(const void* memPtr) +{ + if (FSE_isLittleEndian()) + return FSE_read64(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); + } +} + +static size_t FSE_readLEST(const void* memPtr) +{ + if (FSE_32bits()) + return (size_t)FSE_readLE32(memPtr); + else + return (size_t)FSE_readLE64(memPtr); +} + + + +/**************************************************************** +* Constants +*****************************************************************/ +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) +#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX +#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" +#endif + + +/**************************************************************** +* Error Management +****************************************************************/ +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/**************************************************************** +* Complex types +****************************************************************/ +typedef struct +{ + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; /* total 8 bytes */ + +typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; + +/**************************************************************** +* Internal functions +****************************************************************/ +FORCE_INLINE unsigned FSE_highbit32 (U32 val) +{ +# if defined(_MSC_VER) /* Visual */ + unsigned long r; + _BitScanReverse ( &r, val ); + return (unsigned) r; +# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */ + return 31 - __builtin_clz (val); +# else /* Software version */ + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + unsigned r; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; + return r; +# endif +} + + +/**************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + + +static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } + +#define FSE_DECODE_TYPE FSE_decode_t + + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; /* sizeof U32 */ + +static size_t FSE_buildDTable +(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1; /* because dt is unsigned, 32-bits aligned on 32-bits */ + const U32 tableSize = 1 << tableLog; + const U32 tableMask = tableSize-1; + const U32 step = FSE_tableStep(tableSize); + U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + U32 position = 0; + U32 highThreshold = tableSize-1; + const S16 largeLimit= (S16)(1 << (tableLog-1)); + U32 noLarge = 1; + U32 s; + + /* Sanity Checks */ + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge; + if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge; + + /* Init, lay down lowprob symbols */ + DTableH[0].tableLog = (U16)tableLog; + for (s=0; s<=maxSymbolValue; s++) + { + if (normalizedCounter[s]==-1) + { + tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; + symbolNext[s] = 1; + } + else + { + if (normalizedCounter[s] >= largeLimit) noLarge=0; + symbolNext[s] = normalizedCounter[s]; + } + } + + /* Spread symbols */ + for (s=0; s<=maxSymbolValue; s++) + { + int i; + for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } + } + + if (position!=0) return (size_t)-FSE_ERROR_GENERIC; /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + + /* Build Decoding table */ + { + U32 i; + for (i=0; ifastMode = (U16)noLarge; + return 0; +} + + +/****************************************** +* FSE byte symbol +******************************************/ +#ifndef FSE_COMMONDEFS_ONLY + +static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); } + +static short FSE_abs(short a) +{ + return a<0? -a : a; +} + + +/**************************************************************** +* Header bitstream management +****************************************************************/ +static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + const BYTE* const istart = (const BYTE*) headerBuffer; + const BYTE* const iend = istart + hbSize; + const BYTE* ip = istart; + int nbBits; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong; + bitStream = FSE_readLE32(ip); + nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ + if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge; + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = nbBits; + remaining = (1<1) && (charnum<=*maxSVPtr)) + { + if (previous0) + { + unsigned n0 = charnum; + while ((bitStream & 0xFFFF) == 0xFFFF) + { + n0+=24; + if (ip < iend-5) + { + ip+=2; + bitStream = FSE_readLE32(ip) >> bitCount; + } + else + { + bitStream >>= 16; + bitCount+=16; + } + } + while ((bitStream & 3) == 3) + { + n0+=3; + bitStream>>=2; + bitCount+=2; + } + n0 += bitStream & 3; + bitCount += 2; + if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall; + while (charnum < n0) normalizedCounter[charnum++] = 0; + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) + { + ip += bitCount>>3; + bitCount &= 7; + bitStream = FSE_readLE32(ip) >> bitCount; + } + else + bitStream >>= 2; + } + { + const short max = (short)((2*threshold-1)-remaining); + short count; + + if ((bitStream & (threshold-1)) < (U32)max) + { + count = (short)(bitStream & (threshold-1)); + bitCount += nbBits-1; + } + else + { + count = (short)(bitStream & (2*threshold-1)); + if (count >= threshold) count -= max; + bitCount += nbBits; + } + + count--; /* extra accuracy */ + remaining -= FSE_abs(count); + normalizedCounter[charnum++] = count; + previous0 = !count; + while (remaining < threshold) + { + nbBits--; + threshold >>= 1; + } + + { + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) + { + ip += bitCount>>3; + bitCount &= 7; + } + else + { + bitCount -= (int)(8 * (iend - 4 - ip)); + ip = iend - 4; + } + bitStream = FSE_readLE32(ip) >> (bitCount & 31); + } + } + } + if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC; + *maxSVPtr = charnum-1; + + ip += (bitCount+7)>>3; + if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong; + return ip-istart; +} + + +/********************************************************* +* Decompression (Byte symbols) +*********************************************************/ +static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ + + DTableH->tableLog = 0; + DTableH->fastMode = 0; + + cell->newState = 0; + cell->symbol = symbolValue; + cell->nbBits = 0; + + return 0; +} + + +static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSymbolValue = tableMask; + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC; /* min size */ + + /* Build Decoding Table */ + DTableH->tableLog = (U16)nbBits; + DTableH->fastMode = 1; + for (s=0; s<=maxSymbolValue; s++) + { + dinfo[s].newState = 0; + dinfo[s].symbol = (BYTE)s; + dinfo[s].nbBits = (BYTE)nbBits; + } + + return 0; +} + + +/* FSE_initDStream + * Initialize a FSE_DStream_t. + * srcBuffer must point at the beginning of an FSE block. + * The function result is the size of the FSE_block (== srcSize). + * If srcSize is too small, the function will return an errorCode; + */ +static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize) +{ + if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong; + + if (srcSize >= sizeof(size_t)) + { + U32 contain32; + bitD->start = (const char*)srcBuffer; + bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); + bitD->bitContainer = FSE_readLEST(bitD->ptr); + contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; + if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ + bitD->bitsConsumed = 8 - FSE_highbit32(contain32); + } + else + { + U32 contain32; + bitD->start = (const char*)srcBuffer; + bitD->ptr = bitD->start; + bitD->bitContainer = *(const BYTE*)(bitD->start); + switch(srcSize) + { + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; + default:; + } + contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; + if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ + bitD->bitsConsumed = 8 - FSE_highbit32(contain32); + bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; + } + + return srcSize; +} + + +/*!FSE_lookBits + * Provides next n bits from the bitContainer. + * bitContainer is not modified (bits are still present for next read/look) + * On 32-bits, maxNbBits==25 + * On 64-bits, maxNbBits==57 + * return : value extracted. + */ +static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits) +{ + const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); +} + +static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ +{ + const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; + return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); +} + +static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits) +{ + bitD->bitsConsumed += nbBits; +} + + +/*!FSE_readBits + * Read next n bits from the bitContainer. + * On 32-bits, don't read more than maxNbBits==25 + * On 64-bits, don't read more than maxNbBits==57 + * Use the fast variant *only* if n >= 1. + * return : value extracted. + */ +static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits) +{ + size_t value = FSE_lookBits(bitD, nbBits); + FSE_skipBits(bitD, nbBits); + return value; +} + +static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ +{ + size_t value = FSE_lookBitsFast(bitD, nbBits); + FSE_skipBits(bitD, nbBits); + return value; +} + +static unsigned FSE_reloadDStream(FSE_DStream_t* bitD) +{ + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ + return FSE_DStream_tooFar; + + if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) + { + bitD->ptr -= bitD->bitsConsumed >> 3; + bitD->bitsConsumed &= 7; + bitD->bitContainer = FSE_readLEST(bitD->ptr); + return FSE_DStream_unfinished; + } + if (bitD->ptr == bitD->start) + { + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer; + return FSE_DStream_completed; + } + { + U32 nbBytes = bitD->bitsConsumed >> 3; + U32 result = FSE_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) + { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ + result = FSE_DStream_endOfBuffer; + } + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes*8; + bitD->bitContainer = FSE_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ + return result; + } +} + + +static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; + DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog); + FSE_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) +{ + const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + const U32 nbBits = DInfo.nbBits; + BYTE symbol = DInfo.symbol; + size_t lowBits = FSE_readBits(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) +{ + const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + const U32 nbBits = DInfo.nbBits; + BYTE symbol = DInfo.symbol; + size_t lowBits = FSE_readBitsFast(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +/* FSE_endOfDStream + Tells if bitD has reached end of bitStream or not */ + +static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD) +{ + return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8)); +} + +static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) +{ + return DStatePtr->state == 0; +} + + +FORCE_INLINE size_t FSE_decompress_usingDTable_generic( + void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt, const unsigned fast) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const omax = op + maxDstSize; + BYTE* const olimit = omax-3; + + FSE_DStream_t bitD; + FSE_DState_t state1; + FSE_DState_t state2; + size_t errorCode; + + /* Init */ + errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ + if (FSE_isError(errorCode)) return errorCode; + + FSE_initDState(&state1, &bitD, dt); + FSE_initDState(&state2, &bitD, dt); + +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) + + /* 4 symbols per loop */ + for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ + FSE_reloadDStream(&bitD); + + op[1] = FSE_GETSYMBOL(&state2); + + if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } } + + op[2] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + FSE_reloadDStream(&bitD); + + op[3] = FSE_GETSYMBOL(&state2); + } + + /* tail */ + /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */ + while (1) + { + if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) + break; + + *op++ = FSE_GETSYMBOL(&state1); + + if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) + break; + + *op++ = FSE_GETSYMBOL(&state2); + } + + /* end ? */ + if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) + return op-ostart; + + if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */ + + return (size_t)-FSE_ERROR_corruptionDetected; +} + + +static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt) +{ + FSE_DTableHeader DTableH; + memcpy(&DTableH, dt, sizeof(DTableH)); /* memcpy() into local variable, to avoid strict aliasing warning */ + + /* select fast mode (static) */ + if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); +} + + +static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* const istart = (const BYTE*)cSrc; + const BYTE* ip = istart; + short counting[FSE_MAX_SYMBOL_VALUE+1]; + DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ + unsigned tableLog; + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + size_t errorCode; + + if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ + + /* normal FSE decoding mode */ + errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); + if (FSE_isError(errorCode)) return errorCode; + if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ + ip += errorCode; + cSrcSize -= errorCode; + + errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); + if (FSE_isError(errorCode)) return errorCode; + + /* always return, even if it is an error code */ + return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); +} + + + +/* ******************************************************* +* Huff0 : Huffman block compression +*********************************************************/ +#define HUF_MAX_SYMBOL_VALUE 255 +#define HUF_DEFAULT_TABLELOG 12 /* used by default, when not specified */ +#define HUF_MAX_TABLELOG 12 /* max possible tableLog; for allocation purpose; can be modified */ +#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) +# error "HUF_MAX_TABLELOG is too large !" +#endif + +typedef struct HUF_CElt_s { + U16 val; + BYTE nbBits; +} HUF_CElt ; + +typedef struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +} nodeElt; + + +/* ******************************************************* +* Huff0 : Huffman block decompression +*********************************************************/ +typedef struct { + BYTE byte; + BYTE nbBits; +} HUF_DElt; + +static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize) +{ + BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; + U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ + U32 weightTotal; + U32 maxBits; + const BYTE* ip = (const BYTE*) src; + size_t iSize; + size_t oSize; + U32 n; + U32 nextRankStart; + void* ptr = DTable+1; + HUF_DElt* const dt = (HUF_DElt*)ptr; + + if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + iSize = ip[0]; + + FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16)); /* if compilation fails here, assertion is false */ + //memset(huffWeight, 0, sizeof(huffWeight)); /* should not be necessary, but some analyzer complain ... */ + if (iSize >= 128) /* special header */ + { + if (iSize >= (242)) /* RLE */ + { + static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; + oSize = l[iSize-242]; + memset(huffWeight, 1, sizeof(huffWeight)); + iSize = 0; + } + else /* Incompressible */ + { + oSize = iSize - 127; + iSize = ((oSize+1)/2); + if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + ip += 1; + for (n=0; n> 4; + huffWeight[n+1] = ip[n/2] & 15; + } + } + } + else /* header compressed with FSE (normal case) */ + { + if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize); /* max 255 values decoded, last one is implied */ + if (FSE_isError(oSize)) return oSize; + } + + /* collect weight stats */ + memset(rankVal, 0, sizeof(rankVal)); + weightTotal = 0; + for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected; + rankVal[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; + } + if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected; + + /* get last non-null symbol weight (implied, total must be 2^n) */ + maxBits = FSE_highbit32(weightTotal) + 1; + if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* DTable is too small */ + DTable[0] = (U16)maxBits; + { + U32 total = 1 << maxBits; + U32 rest = total - weightTotal; + U32 verif = 1 << FSE_highbit32(rest); + U32 lastWeight = FSE_highbit32(rest) + 1; + if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected; /* last value must be a clean power of 2 */ + huffWeight[oSize] = (BYTE)lastWeight; + rankVal[lastWeight]++; + } + + /* check tree construction validity */ + if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected; /* by construction : at least 2 elts of rank 1, must be even */ + + /* Prepare ranks */ + nextRankStart = 0; + for (n=1; n<=maxBits; n++) + { + U32 current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } + + /* fill DTable */ + for (n=0; n<=oSize; n++) + { + const U32 w = huffWeight[n]; + const U32 length = (1 << w) >> 1; + U32 i; + HUF_DElt D; + D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w); + for (i = rankVal[w]; i < rankVal[w] + length; i++) + dt[i] = D; + rankVal[w] += length; + } + + return iSize+1; +} + + +static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog) +{ + const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ + const BYTE c = dt[val].byte; + FSE_skipBits(Dstream, dt[val].nbBits); + return c; +} + +static size_t HUF_decompress_usingDTable( /* -3% slower when non static */ + void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const U16* DTable) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const omax = op + maxDstSize; + BYTE* const olimit = omax-15; + + const void* ptr = DTable; + const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1; + const U32 dtLog = DTable[0]; + size_t errorCode; + U32 reloadStatus; + + /* Init */ + + const U16* jumpTable = (const U16*)cSrc; + const size_t length1 = FSE_readLE16(jumpTable); + const size_t length2 = FSE_readLE16(jumpTable+1); + const size_t length3 = FSE_readLE16(jumpTable+2); + const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; // check coherency !! + const char* const start1 = (const char*)(cSrc) + 6; + const char* const start2 = start1 + length1; + const char* const start3 = start2 + length2; + const char* const start4 = start3 + length3; + FSE_DStream_t bitD1, bitD2, bitD3, bitD4; + + if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + + errorCode = FSE_initDStream(&bitD1, start1, length1); + if (FSE_isError(errorCode)) return errorCode; + errorCode = FSE_initDStream(&bitD2, start2, length2); + if (FSE_isError(errorCode)) return errorCode; + errorCode = FSE_initDStream(&bitD3, start3, length3); + if (FSE_isError(errorCode)) return errorCode; + errorCode = FSE_initDStream(&bitD4, start4, length4); + if (FSE_isError(errorCode)) return errorCode; + + reloadStatus=FSE_reloadDStream(&bitD2); + + /* 16 symbols per loop */ + for ( ; (reloadStatus12)) FSE_reloadDStream(&Dstream) + +#define HUF_DECODE_SYMBOL_2(n, Dstream) \ + op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \ + if (FSE_32bits()) FSE_reloadDStream(&Dstream) + + HUF_DECODE_SYMBOL_1( 0, bitD1); + HUF_DECODE_SYMBOL_1( 1, bitD2); + HUF_DECODE_SYMBOL_1( 2, bitD3); + HUF_DECODE_SYMBOL_1( 3, bitD4); + HUF_DECODE_SYMBOL_2( 4, bitD1); + HUF_DECODE_SYMBOL_2( 5, bitD2); + HUF_DECODE_SYMBOL_2( 6, bitD3); + HUF_DECODE_SYMBOL_2( 7, bitD4); + HUF_DECODE_SYMBOL_1( 8, bitD1); + HUF_DECODE_SYMBOL_1( 9, bitD2); + HUF_DECODE_SYMBOL_1(10, bitD3); + HUF_DECODE_SYMBOL_1(11, bitD4); + HUF_DECODE_SYMBOL_0(12, bitD1); + HUF_DECODE_SYMBOL_0(13, bitD2); + HUF_DECODE_SYMBOL_0(14, bitD3); + HUF_DECODE_SYMBOL_0(15, bitD4); + } + + if (reloadStatus!=FSE_DStream_completed) /* not complete : some bitStream might be FSE_DStream_unfinished */ + return (size_t)-FSE_ERROR_corruptionDetected; + + /* tail */ + { + // bitTail = bitD1; // *much* slower : -20% !??! + FSE_DStream_t bitTail; + bitTail.ptr = bitD1.ptr; + bitTail.bitsConsumed = bitD1.bitsConsumed; + bitTail.bitContainer = bitD1.bitContainer; // required in case of FSE_DStream_endOfBuffer + bitTail.start = start1; + for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + ip += errorCode; + cSrcSize -= errorCode; + + return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable); +} + + +#endif /* FSE_COMMONDEFS_ONLY */ + +/* + zstd - standard compression library + Copyright (C) 2014-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - zstd source repository : https://github.com/Cyan4973/zstd + - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c +*/ + +/**************************************************************** +* Tuning parameters +*****************************************************************/ +/* MEMORY_USAGE : +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect */ +#define ZSTD_MEMORY_USAGE 17 + + +/************************************** + CPU Feature Detection +**************************************/ +/* + * Automated efficient unaligned memory access detection + * Based on known hardware architectures + * This list will be updated thanks to feedbacks + */ +#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \ + || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(__i386__) || defined(__x86_64__) \ + || defined(_M_IX86) || defined(_M_X64) \ + || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \ + || (defined(_M_ARM) && (_M_ARM >= 7)) +# define ZSTD_UNALIGNED_ACCESS 1 +#else +# define ZSTD_UNALIGNED_ACCESS 0 +#endif + + +/******************************************************** +* Includes +*********************************************************/ +#include /* calloc */ +#include /* memcpy, memmove */ +#include /* debug : printf */ + + +/******************************************************** +* Compiler specifics +*********************************************************/ +#ifdef __AVX2__ +# include /* AVX2 intrinsics */ +#endif + +#ifdef _MSC_VER /* Visual Studio */ +# include /* For Visual 2005 */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4324) /* disable: C4324: padded structure */ +#endif + + +#ifndef MEM_ACCESS_MODULE +#define MEM_ACCESS_MODULE +/******************************************************** +* Basic Types +*********************************************************/ +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# include +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef int16_t S16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; +#else +typedef unsigned char BYTE; +typedef unsigned short U16; +typedef signed short S16; +typedef unsigned int U32; +typedef signed int S32; +typedef unsigned long long U64; +#endif + +#endif /* MEM_ACCESS_MODULE */ + + +/******************************************************** +* Constants +*********************************************************/ +static const U32 ZSTD_magicNumber = 0xFD2FB51E; /* 3rd version : seqNb header */ + +#define HASH_LOG (ZSTD_MEMORY_USAGE - 2) +#define HASH_TABLESIZE (1 << HASH_LOG) +#define HASH_MASK (HASH_TABLESIZE - 1) + +#define KNUTH 2654435761 + +#define BIT7 128 +#define BIT6 64 +#define BIT5 32 +#define BIT4 16 + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define BLOCKSIZE (128 KB) /* define, for static allocation */ + +#define WORKPLACESIZE (BLOCKSIZE*3) +#define MINMATCH 4 +#define MLbits 7 +#define LLbits 6 +#define Offbits 5 +#define MaxML ((1<>3]; +#else + U32 hashTable[HASH_TABLESIZE]; +#endif + BYTE buffer[WORKPLACESIZE]; +} cctxi_t; + + + + +/************************************** +* Error Management +**************************************/ +/* published entry point */ +unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); } + + +/************************************** +* Tool functions +**************************************/ +#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ +#define ZSTD_VERSION_MINOR 1 /* for new (non-breaking) interface capabilities */ +#define ZSTD_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */ +#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) + +/************************************************************** +* Decompression code +**************************************************************/ + +size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) +{ + const BYTE* const in = (const BYTE* const)src; + BYTE headerFlags; + U32 cSize; + + if (srcSize < 3) return ERROR(srcSize_wrong); + + headerFlags = *in; + cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); + + bpPtr->blockType = (blockType_t)(headerFlags >> 6); + bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; + + if (bpPtr->blockType == bt_end) return 0; + if (bpPtr->blockType == bt_rle) return 1; + return cSize; +} + + +static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); + memcpy(dst, src, srcSize); + return srcSize; +} + + +static size_t ZSTD_decompressLiterals(void* ctx, + void* dst, size_t maxDstSize, + const void* src, size_t srcSize) +{ + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + maxDstSize; + const BYTE* ip = (const BYTE*)src; + size_t errorCode; + size_t litSize; + + /* check : minimum 2, for litSize, +1, for content */ + if (srcSize <= 3) return ERROR(corruption_detected); + + litSize = ip[1] + (ip[0]<<8); + litSize += ((ip[-3] >> 3) & 7) << 16; // mmmmh.... + op = oend - litSize; + + (void)ctx; + if (litSize > maxDstSize) return ERROR(dstSize_tooSmall); + errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2); + if (FSE_isError(errorCode)) return ERROR(GENERIC); + return litSize; +} + + +size_t ZSTDv01_decodeLiteralsBlock(void* ctx, + void* dst, size_t maxDstSize, + const BYTE** litStart, size_t* litSize, + const void* src, size_t srcSize) +{ + const BYTE* const istart = (const BYTE* const)src; + const BYTE* ip = istart; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + blockProperties_t litbp; + + size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp); + if (ZSTDv01_isError(litcSize)) return litcSize; + if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + ip += ZSTD_blockHeaderSize; + + switch(litbp.blockType) + { + case bt_raw: + *litStart = ip; + ip += litcSize; + *litSize = litcSize; + break; + case bt_rle: + { + size_t rleSize = litbp.origSize; + if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall); + if (!srcSize) return ERROR(srcSize_wrong); + memset(oend - rleSize, *ip, rleSize); + *litStart = oend - rleSize; + *litSize = rleSize; + ip++; + break; + } + case bt_compressed: + { + size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize); + if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize; + *litStart = oend - decodedLitSize; + *litSize = decodedLitSize; + ip += litcSize; + break; + } + case bt_end: + default: + return ERROR(GENERIC); + } + + return ip-istart; +} + + +size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, + FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, + const void* src, size_t srcSize) +{ + const BYTE* const istart = (const BYTE* const)src; + const BYTE* ip = istart; + const BYTE* const iend = istart + srcSize; + U32 LLtype, Offtype, MLtype; + U32 LLlog, Offlog, MLlog; + size_t dumpsLength; + + /* check */ + if (srcSize < 5) return ERROR(srcSize_wrong); + + /* SeqHead */ + *nbSeq = ZSTD_readLE16(ip); ip+=2; + LLtype = *ip >> 6; + Offtype = (*ip >> 4) & 3; + MLtype = (*ip >> 2) & 3; + if (*ip & 2) + { + dumpsLength = ip[2]; + dumpsLength += ip[1] << 8; + ip += 3; + } + else + { + dumpsLength = ip[1]; + dumpsLength += (ip[0] & 1) << 8; + ip += 2; + } + *dumpsPtr = ip; + ip += dumpsLength; + *dumpsLengthPtr = dumpsLength; + + /* check */ + if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ + + /* sequences */ + { + S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */ + size_t headerSize; + + /* Build DTables */ + switch(LLtype) + { + case bt_rle : + LLlog = 0; + FSE_buildDTable_rle(DTableLL, *ip++); break; + case bt_raw : + LLlog = LLbits; + FSE_buildDTable_raw(DTableLL, LLbits); break; + default : + { U32 max = MaxLL; + headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); + if (FSE_isError(headerSize)) return ERROR(GENERIC); + if (LLlog > LLFSELog) return ERROR(corruption_detected); + ip += headerSize; + FSE_buildDTable(DTableLL, norm, max, LLlog); + } } + + switch(Offtype) + { + case bt_rle : + Offlog = 0; + if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ + FSE_buildDTable_rle(DTableOffb, *ip++); break; + case bt_raw : + Offlog = Offbits; + FSE_buildDTable_raw(DTableOffb, Offbits); break; + default : + { U32 max = MaxOff; + headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); + if (FSE_isError(headerSize)) return ERROR(GENERIC); + if (Offlog > OffFSELog) return ERROR(corruption_detected); + ip += headerSize; + FSE_buildDTable(DTableOffb, norm, max, Offlog); + } } + + switch(MLtype) + { + case bt_rle : + MLlog = 0; + if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ + FSE_buildDTable_rle(DTableML, *ip++); break; + case bt_raw : + MLlog = MLbits; + FSE_buildDTable_raw(DTableML, MLbits); break; + default : + { U32 max = MaxML; + headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); + if (FSE_isError(headerSize)) return ERROR(GENERIC); + if (MLlog > MLFSELog) return ERROR(corruption_detected); + ip += headerSize; + FSE_buildDTable(DTableML, norm, max, MLlog); + } } } + + return ip-istart; +} + + +typedef struct { + size_t litLength; + size_t offset; + size_t matchLength; +} seq_t; + +typedef struct { + FSE_DStream_t DStream; + FSE_DState_t stateLL; + FSE_DState_t stateOffb; + FSE_DState_t stateML; + size_t prevOffset; + const BYTE* dumps; + const BYTE* dumpsEnd; +} seqState_t; + + +static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) +{ + size_t litLength; + size_t prevOffset; + size_t offset; + size_t matchLength; + const BYTE* dumps = seqState->dumps; + const BYTE* const de = seqState->dumpsEnd; + + /* Literal length */ + litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); + prevOffset = litLength ? seq->offset : seqState->prevOffset; + seqState->prevOffset = seq->offset; + if (litLength == MaxLL) + { + U32 add = dumps 1 byte */ + dumps += 3; + } + } + } + + /* Offset */ + { + U32 offsetCode, nbBits; + offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); + if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); + nbBits = offsetCode - 1; + if (offsetCode==0) nbBits = 0; /* cmove */ + offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits); + if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); + if (offsetCode==0) offset = prevOffset; + } + + /* MatchLength */ + matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); + if (matchLength == MaxML) + { + U32 add = dumps 1 byte */ + dumps += 3; + } + } + } + matchLength += MINMATCH; + + /* save result */ + seq->litLength = litLength; + seq->offset = offset; + seq->matchLength = matchLength; + seqState->dumps = dumps; +} + + +static size_t ZSTD_execSequence(BYTE* op, + seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + BYTE* const base, BYTE* const oend) +{ + static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ + static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* substracted */ + const BYTE* const ostart = op; + const size_t litLength = sequence.litLength; + BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ + const BYTE* const litEnd = *litPtr + litLength; + + /* check */ + if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); + if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ + + /* copy Literals */ + if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8)) + memmove(op, *litPtr, litLength); /* overwrite risk */ + else + ZSTD_wildcopy(op, *litPtr, litLength); + op += litLength; + *litPtr = litEnd; /* update for next sequence */ + + /* check : last match must be at a minimum distance of 8 from end of dest buffer */ + if (oend-op < 8) return ERROR(dstSize_tooSmall); + + /* copy Match */ + { + const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12); + const BYTE* match = op - sequence.offset; /* possible underflow at op - offset ? */ + size_t qutt = 12; + U64 saved[2]; + + /* check */ + if (match < base) return ERROR(corruption_detected); + if (sequence.offset > (size_t)base) return ERROR(corruption_detected); + + /* save beginning of literal sequence, in case of write overlap */ + if (overlapRisk) + { + if ((endMatch + qutt) > oend) qutt = oend-endMatch; + memcpy(saved, endMatch, qutt); + } + + if (sequence.offset < 8) + { + const int dec64 = dec64table[sequence.offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[sequence.offset]; + ZSTD_copy4(op+4, match); + match -= dec64; + } else { ZSTD_copy8(op, match); } + op += 8; match += 8; + + if (endMatch > oend-(16-MINMATCH)) + { + if (op < oend-8) + { + ZSTD_wildcopy(op, match, (oend-8) - op); + match += (oend-8) - op; + op = oend-8; + } + while (opLLTable; + U32* DTableML = dctx->MLTable; + U32* DTableOffb = dctx->OffTable; + BYTE* const base = (BYTE*) (dctx->base); + + /* Build Decoding Tables */ + errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, + DTableLL, DTableML, DTableOffb, + ip, iend-ip); + if (ZSTDv01_isError(errorCode)) return errorCode; + ip += errorCode; + + /* Regen sequences */ + { + seq_t sequence; + seqState_t seqState; + + memset(&sequence, 0, sizeof(sequence)); + seqState.dumps = dumps; + seqState.dumpsEnd = dumps + dumpsLength; + seqState.prevOffset = 1; + errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip); + if (FSE_isError(errorCode)) return ERROR(corruption_detected); + FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); + FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); + FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); + + for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; ) + { + size_t oneSeqSize; + nbSeq--; + ZSTD_decodeSequence(&sequence, &seqState); + oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend); + if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } + + /* check if reached exact end */ + if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */ + if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */ + + /* last literal segment */ + { + size_t lastLLSize = litEnd - litPtr; + if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); + if (op != litPtr) memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + } + + return op-ostart; +} + + +static size_t ZSTD_decompressBlock( + void* ctx, + void* dst, size_t maxDstSize, + const void* src, size_t srcSize) +{ + /* blockType == blockCompressed, srcSize is trusted */ + const BYTE* ip = (const BYTE*)src; + const BYTE* litPtr = NULL; + size_t litSize = 0; + size_t errorCode; + + /* Decode literals sub-block */ + errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize); + if (ZSTDv01_isError(errorCode)) return errorCode; + ip += errorCode; + srcSize -= errorCode; + + return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize); +} + + +size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* iend = ip + srcSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* op = ostart; + BYTE* const oend = ostart + maxDstSize; + size_t remainingSize = srcSize; + U32 magicNumber; + size_t errorCode=0; + blockProperties_t blockProperties; + + /* Frame Header */ + if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + magicNumber = ZSTD_readBE32(src); + if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); + ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; + + /* Loop on each block */ + while (1) + { + size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties); + if (ZSTDv01_isError(blockSize)) return blockSize; + + ip += ZSTD_blockHeaderSize; + remainingSize -= ZSTD_blockHeaderSize; + if (blockSize > remainingSize) return ERROR(srcSize_wrong); + + switch(blockProperties.blockType) + { + case bt_compressed: + errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize); + break; + case bt_raw : + errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize); + break; + case bt_rle : + return ERROR(GENERIC); /* not yet supported */ + break; + case bt_end : + /* end of frame */ + if (remainingSize) return ERROR(srcSize_wrong); + break; + default: + return ERROR(GENERIC); + } + if (blockSize == 0) break; /* bt_end */ + + if (ZSTDv01_isError(errorCode)) return errorCode; + op += errorCode; + ip += blockSize; + remainingSize -= blockSize; + } + + return op-ostart; +} + +size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + dctx_t ctx; + ctx.base = dst; + return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize); +} + +size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + size_t remainingSize = srcSize; + U32 magicNumber; + blockProperties_t blockProperties; + + /* Frame Header */ + if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + magicNumber = ZSTD_readBE32(src); + if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); + ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; + + /* Loop on each block */ + while (1) + { + size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTDv01_isError(blockSize)) return blockSize; + + ip += ZSTD_blockHeaderSize; + remainingSize -= ZSTD_blockHeaderSize; + if (blockSize > remainingSize) return ERROR(srcSize_wrong); + + if (blockSize == 0) break; /* bt_end */ + + ip += blockSize; + remainingSize -= blockSize; + } + + return ip - (const BYTE*)src; +} + +/******************************* +* Streaming Decompression API +*******************************/ + +size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx) +{ + dctx->expected = ZSTD_frameHeaderSize; + dctx->phase = 0; + dctx->previousDstEnd = NULL; + dctx->base = NULL; + return 0; +} + +ZSTDv01_Dctx* ZSTDv01_createDCtx(void) +{ + ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx)); + if (dctx==NULL) return NULL; + ZSTDv01_resetDCtx(dctx); + return dctx; +} + +size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx) +{ + free(dctx); + return 0; +} + +size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx) +{ + return ((dctx_t*)dctx)->expected; +} + +size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + dctx_t* ctx = (dctx_t*)dctx; + + /* Sanity check */ + if (srcSize != ctx->expected) return ERROR(srcSize_wrong); + if (dst != ctx->previousDstEnd) /* not contiguous */ + ctx->base = dst; + + /* Decompress : frame header */ + if (ctx->phase == 0) + { + /* Check frame magic header */ + U32 magicNumber = ZSTD_readBE32(src); + if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); + ctx->phase = 1; + ctx->expected = ZSTD_blockHeaderSize; + return 0; + } + + /* Decompress : block header */ + if (ctx->phase == 1) + { + blockProperties_t bp; + size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ZSTDv01_isError(blockSize)) return blockSize; + if (bp.blockType == bt_end) + { + ctx->expected = 0; + ctx->phase = 0; + } + else + { + ctx->expected = blockSize; + ctx->bType = bp.blockType; + ctx->phase = 2; + } + + return 0; + } + + /* Decompress : block content */ + { + size_t rSize; + switch(ctx->bType) + { + case bt_compressed: + rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize); + break; + case bt_raw : + rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize); + break; + case bt_rle : + return ERROR(GENERIC); /* not yet handled */ + break; + case bt_end : /* should never happen (filtered at phase 1) */ + rSize = 0; + break; + default: + return ERROR(GENERIC); + } + ctx->phase = 1; + ctx->expected = ZSTD_blockHeaderSize; + ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); + return rSize; + } + +} diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.h b/vendor/github.com/DataDog/zstd/zstd_v01.h new file mode 100644 index 00000000000..42f0897c7d2 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_v01.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_V01_H_28739879432 +#define ZSTD_V01_H_28739879432 + +#if defined (__cplusplus) +extern "C" { +#endif + +/* ************************************* +* Includes +***************************************/ +#include /* size_t */ + + +/* ************************************* +* Simple one-step function +***************************************/ +/** +ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format + compressedSize : is the exact source size + maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. + It must be equal or larger than originalSize, otherwise decompression will fail. + return : the number of bytes decompressed into destination buffer (originalSize) + or an errorCode if it fails (which can be tested using ZSTDv01_isError()) +*/ +size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize, + const void* src, size_t compressedSize); + +/** +ZSTDv01_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.1.x format + compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' + return : the number of bytes that would be read to decompress this frame + or an errorCode if it fails (which can be tested using ZSTDv01_isError()) +*/ +size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t compressedSize); + +/** +ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error +*/ +unsigned ZSTDv01_isError(size_t code); + + +/* ************************************* +* Advanced functions +***************************************/ +typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx; +ZSTDv01_Dctx* ZSTDv01_createDCtx(void); +size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx); + +size_t ZSTDv01_decompressDCtx(void* ctx, + void* dst, size_t maxOriginalSize, + const void* src, size_t compressedSize); + +/* ************************************* +* Streaming functions +***************************************/ +size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx); + +size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx); +size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); +/** + Use above functions alternatively. + ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). + ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. + Result is the number of bytes regenerated within 'dst'. + It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. +*/ + +/* ************************************* +* Prefix - version detection +***************************************/ +#define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */ +#define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_V01_H_28739879432 */ diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.c b/vendor/github.com/DataDog/zstd/zstd_v02.c new file mode 100644 index 00000000000..8bc0eceeda8 --- /dev/null +++ b/vendor/github.com/DataDog/zstd/zstd_v02.c @@ -0,0 +1,3483 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +#include /* size_t, ptrdiff_t */ +#include "zstd_v02.h" +#include "error_private.h" + + +/****************************************** +* Compiler-specific +******************************************/ +#if defined(_MSC_VER) /* Visual Studio */ +# include /* _byteswap_ulong */ +# include /* _byteswap_* */ +#endif + + +/* ****************************************************************** + mem.h + low-level memory access routines + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ +#ifndef MEM_H_MODULE +#define MEM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + +/****************************************** +* Includes +******************************************/ +#include /* size_t, ptrdiff_t */ +#include /* memcpy */ + + +/****************************************** +* Compiler-specific +******************************************/ +#if defined(__GNUC__) +# define MEM_STATIC static __attribute__((unused)) +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define MEM_STATIC static inline +#elif defined(_MSC_VER) +# define MEM_STATIC static __inline +#else +# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + + +/**************************************************************** +* Basic Types +*****************************************************************/ +#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef int16_t S16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef int64_t S64; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef signed short S16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; + typedef signed long long S64; +#endif + + +/**************************************************************** +* Memory I/O +*****************************************************************/ +/* MEM_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets generating assembly depending on alignment. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define MEM_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) +# define MEM_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } +MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } + +MEM_STATIC unsigned MEM_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) + +/* violates C standard on structure alignment. +Only use if no other choice to achieve best performance on target platform */ +MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } +MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } +MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } + +#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; + +MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } + +#else + +/* default method, safe and standard. + can sometimes prove slower */ + +MEM_STATIC U16 MEM_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U32 MEM_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U64 MEM_read64(const void* memPtr) +{ + U64 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +#endif // MEM_FORCE_MEMORY_ACCESS + + +MEM_STATIC U16 MEM_readLE16(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read16(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U16)(p[0] + (p[1]<<8)); + } +} + +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) +{ + if (MEM_isLittleEndian()) + { + MEM_write16(memPtr, val); + } + else + { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE)val; + p[1] = (BYTE)(val>>8); + } +} + +MEM_STATIC U32 MEM_readLE32(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read32(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); + } +} + + +MEM_STATIC U64 MEM_readLE64(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read64(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); + } +} + + +MEM_STATIC size_t MEM_readLEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readLE32(memPtr); + else + return (size_t)MEM_readLE64(memPtr); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* MEM_H_MODULE */ + + +/* ****************************************************************** + bitstream + Part of NewGen Entropy library + header file (to include) + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ +#ifndef BITSTREAM_H_MODULE +#define BITSTREAM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* +* This API consists of small unitary functions, which highly benefit from being inlined. +* Since link-time-optimization is not available for all compilers, +* these functions are defined into a .h to be included. +*/ + + +/********************************************** +* bitStream decompression API (read backward) +**********************************************/ +typedef struct +{ + size_t bitContainer; + unsigned bitsConsumed; + const char* ptr; + const char* start; +} BIT_DStream_t; + +typedef enum { BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ + /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ + +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); + + +/****************************************** +* unsafe API +******************************************/ +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); +/* faster, but works only if nbBits >= 1 */ + + + +/**************************************************************** +* Helper functions +****************************************************************/ +MEM_STATIC unsigned BIT_highbit32 (U32 val) +{ +# if defined(_MSC_VER) /* Visual */ + unsigned long r=0; + _BitScanReverse ( &r, val ); + return (unsigned) r; +# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ + return 31 - __builtin_clz (val); +# else /* Software version */ + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + unsigned r; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; + return r; +# endif +} + + + +/********************************************************** +* bitStream decoding +**********************************************************/ + +/*!BIT_initDStream +* Initialize a BIT_DStream_t. +* @bitD : a pointer to an already allocated BIT_DStream_t structure +* @srcBuffer must point at the beginning of a bitStream +* @srcSize must be the exact size of the bitStream +* @result : size of stream (== srcSize) or an errorCode if a problem is detected +*/ +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) +{ + if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } + + if (srcSize >= sizeof(size_t)) /* normal case */ + { + U32 contain32; + bitD->start = (const char*)srcBuffer; + bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); + bitD->bitContainer = MEM_readLEST(bitD->ptr); + contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; + if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ + bitD->bitsConsumed = 8 - BIT_highbit32(contain32); + } + else + { + U32 contain32; + bitD->start = (const char*)srcBuffer; + bitD->ptr = bitD->start; + bitD->bitContainer = *(const BYTE*)(bitD->start); + switch(srcSize) + { + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; + default:; + } + contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; + if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ + bitD->bitsConsumed = 8 - BIT_highbit32(contain32); + bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; + } + + return srcSize; +} + +MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) +{ + const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); +} + +/*! BIT_lookBitsFast : +* unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) +{ + const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; + return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); +} + +MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +{ + bitD->bitsConsumed += nbBits; +} + +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t value = BIT_lookBits(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*!BIT_readBitsFast : +* unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t value = BIT_lookBitsFast(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +{ + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ + return BIT_DStream_overflow; + + if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) + { + bitD->ptr -= bitD->bitsConsumed >> 3; + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_unfinished; + } + if (bitD->ptr == bitD->start) + { + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; + return BIT_DStream_completed; + } + { + U32 nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) + { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ + result = BIT_DStream_endOfBuffer; + } + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes*8; + bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ + return result; + } +} + +/*! BIT_endOfDStream +* @return Tells if DStream has reached its exact end +*/ +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) +{ + return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* BITSTREAM_H_MODULE */ +/* ****************************************************************** + Error codes and messages + Copyright (C) 2013-2015, Yann Collet + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ +#ifndef ERROR_H_MODULE +#define ERROR_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + + +/****************************************** +* Compiler-specific +******************************************/ +#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define ERR_STATIC static inline +#elif defined(_MSC_VER) +# define ERR_STATIC static __inline +#elif defined(__GNUC__) +# define ERR_STATIC static __attribute__((unused)) +#else +# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + + +/****************************************** +* Error Management +******************************************/ +#define PREFIX(name) ZSTD_error_##name + +#define ERROR(name) (size_t)-PREFIX(name) + +#define ERROR_LIST(ITEM) \ + ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \ + ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \ + ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \ + ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \ + ITEM(PREFIX(maxCode)) + +#define ERROR_GENERATE_ENUM(ENUM) ENUM, +typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */ + +#define ERROR_CONVERTTOSTRING(STRING) #STRING, +#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR) +static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) }; + +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } + +ERR_STATIC const char* ERR_getErrorName(size_t code) +{ + static const char* codeError = "Unspecified error code"; + if (ERR_isError(code)) return ERR_strings[-(int)(code)]; + return codeError; +} + + +#if defined (__cplusplus) +} +#endif + +#endif /* ERROR_H_MODULE */ +/* +Constructor and Destructor of type FSE_CTable + Note that its size depends on 'tableLog' and 'maxSymbolValue' */ +typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */ +typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ + + +/* ****************************************************************** + FSE : Finite State Entropy coder + header file for static linking (only) + Copyright (C) 2013-2015, Yann Collet + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ +#if defined (__cplusplus) +extern "C" { +#endif + + +/****************************************** +* Static allocation +******************************************/ +/* FSE buffer bounds */ +#define FSE_NCOUNTBOUND 512 +#define FSE_BLOCKBOUND(size) (size + (size>>7)) +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ + + +/****************************************** +* Implementation of inline functions +******************************************/ + +/* decompression */ + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; /* sizeof U32 */ + +typedef struct +{ + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; /* size == U32 */ + +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) +{ + FSE_DTableHeader DTableH; + memcpy(&DTableH, dt, sizeof(DTableH)); + DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + const U32 nbBits = DInfo.nbBits; + BYTE symbol = DInfo.symbol; + size_t lowBits = BIT_readBits(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + const U32 nbBits = DInfo.nbBits; + BYTE symbol = DInfo.symbol; + size_t lowBits = BIT_readBitsFast(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) +{ + return DStatePtr->state == 0; +} + + +#if defined (__cplusplus) +} +#endif +/* ****************************************************************** + Huff0 : Huffman coder, part of New Generation Entropy library + header file for static linking (only) + Copyright (C) 2013-2015, Yann Collet + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/****************************************** +* Static allocation macros +******************************************/ +/* Huff0 buffer bounds */ +#define HUF_CTABLEBOUND 129 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* static allocation of Huff0's DTable */ +#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1< /* size_t */ + + +/* ************************************* +* Version +***************************************/ +#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ +#define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */ +#define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ +#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) + + +/* ************************************* +* Advanced functions +***************************************/ +typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ + +#if defined (__cplusplus) +} +#endif +/* + zstd - standard compression library + Header File for static linking only + Copyright (C) 2014-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - zstd source repository : https://github.com/Cyan4973/zstd + - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c +*/ + +/* The objects defined into this file should be considered experimental. + * They are not labelled stable, as their prototype may change in the future. + * You can use them for tests, provide feedback, or if you can endure risk of future changes. + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* ************************************* +* Streaming functions +***************************************/ + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +/* + Use above functions alternatively. + ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). + ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. + Result is the number of bytes regenerated within 'dst'. + It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. +*/ + +/* ************************************* +* Prefix - version detection +***************************************/ +#define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/ + + +#if defined (__cplusplus) +} +#endif +/* ****************************************************************** + FSE : Finite State Entropy coder + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +#ifndef FSE_COMMONDEFS_ONLY + +/**************************************************************** +* Tuning parameters +****************************************************************/ +/* MEMORY_USAGE : +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ +#define FSE_MAX_MEMORY_USAGE 14 +#define FSE_DEFAULT_MEMORY_USAGE 13 + +/* FSE_MAX_SYMBOL_VALUE : +* Maximum symbol value authorized. +* Required for proper stack allocation */ +#define FSE_MAX_SYMBOL_VALUE 255 + + +/**************************************************************** +* template functions type & suffix +****************************************************************/ +#define FSE_FUNCTION_TYPE BYTE +#define FSE_FUNCTION_EXTENSION + + +/**************************************************************** +* Byte symbol type +****************************************************************/ +#endif /* !FSE_COMMONDEFS_ONLY */ + + +/**************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# include /* For Visual 2005 */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/**************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include /* printf (debug) */ + +/**************************************************************** +* Constants +*****************************************************************/ +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) +#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX +#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" +#endif + + +/**************************************************************** +* Error Management +****************************************************************/ +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/**************************************************************** +* Complex types +****************************************************************/ +typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; + + +/**************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ + +#define FSE_DECODE_TYPE FSE_decode_t + +static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } + +static size_t FSE_buildDTable +(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + void* ptr = dt+1; + FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr; + FSE_DTableHeader DTableH; + const U32 tableSize = 1 << tableLog; + const U32 tableMask = tableSize-1; + const U32 step = FSE_tableStep(tableSize); + U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + U32 position = 0; + U32 highThreshold = tableSize-1; + const S16 largeLimit= (S16)(1 << (tableLog-1)); + U32 noLarge = 1; + U32 s; + + /* Sanity Checks */ + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + + /* Init, lay down lowprob symbols */ + DTableH.tableLog = (U16)tableLog; + for (s=0; s<=maxSymbolValue; s++) + { + if (normalizedCounter[s]==-1) + { + tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; + symbolNext[s] = 1; + } + else + { + if (normalizedCounter[s] >= largeLimit) noLarge=0; + symbolNext[s] = normalizedCounter[s]; + } + } + + /* Spread symbols */ + for (s=0; s<=maxSymbolValue; s++) + { + int i; + for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } + } + + if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + + /* Build Decoding table */ + { + U32 i; + for (i=0; i FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = nbBits; + remaining = (1<1) && (charnum<=*maxSVPtr)) + { + if (previous0) + { + unsigned n0 = charnum; + while ((bitStream & 0xFFFF) == 0xFFFF) + { + n0+=24; + if (ip < iend-5) + { + ip+=2; + bitStream = MEM_readLE32(ip) >> bitCount; + } + else + { + bitStream >>= 16; + bitCount+=16; + } + } + while ((bitStream & 3) == 3) + { + n0+=3; + bitStream>>=2; + bitCount+=2; + } + n0 += bitStream & 3; + bitCount += 2; + if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); + while (charnum < n0) normalizedCounter[charnum++] = 0; + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) + { + ip += bitCount>>3; + bitCount &= 7; + bitStream = MEM_readLE32(ip) >> bitCount; + } + else + bitStream >>= 2; + } + { + const short max = (short)((2*threshold-1)-remaining); + short count; + + if ((bitStream & (threshold-1)) < (U32)max) + { + count = (short)(bitStream & (threshold-1)); + bitCount += nbBits-1; + } + else + { + count = (short)(bitStream & (2*threshold-1)); + if (count >= threshold) count -= max; + bitCount += nbBits; + } + + count--; /* extra accuracy */ + remaining -= FSE_abs(count); + normalizedCounter[charnum++] = count; + previous0 = !count; + while (remaining < threshold) + { + nbBits--; + threshold >>= 1; + } + + { + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) + { + ip += bitCount>>3; + bitCount &= 7; + } + else + { + bitCount -= (int)(8 * (iend - 4 - ip)); + ip = iend - 4; + } + bitStream = MEM_readLE32(ip) >> (bitCount & 31); + } + } + } + if (remaining != 1) return ERROR(GENERIC); + *maxSVPtr = charnum-1; + + ip += (bitCount+7)>>3; + if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); + return ip-istart; +} + + +/********************************************************* +* Decompression (Byte symbols) +*********************************************************/ +static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ + + DTableH->tableLog = 0; + DTableH->fastMode = 0; + + cell->newState = 0; + cell->symbol = symbolValue; + cell->nbBits = 0; + + return 0; +} + + +static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSymbolValue = tableMask; + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* Build Decoding Table */ + DTableH->tableLog = (U16)nbBits; + DTableH->fastMode = 1; + for (s=0; s<=maxSymbolValue; s++) + { + dinfo[s].newState = 0; + dinfo[s].symbol = (BYTE)s; + dinfo[s].nbBits = (BYTE)nbBits; + } + + return 0; +} + +FORCE_INLINE size_t FSE_decompress_usingDTable_generic( + void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt, const unsigned fast) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const omax = op + maxDstSize; + BYTE* const olimit = omax-3; + + BIT_DStream_t bitD; + FSE_DState_t state1; + FSE_DState_t state2; + size_t errorCode; + + /* Init */ + errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ + if (FSE_isError(errorCode)) return errorCode; + + FSE_initDState(&state1, &bitD, dt); + FSE_initDState(&state2, &bitD, dt); + +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) + + /* 4 symbols per loop */ + for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[1] = FSE_GETSYMBOL(&state2); + + if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } + + op[2] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[3] = FSE_GETSYMBOL(&state2); + } + + /* tail */ + /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ + while (1) + { + if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) + break; + + *op++ = FSE_GETSYMBOL(&state1); + + if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) + break; + + *op++ = FSE_GETSYMBOL(&state2); + } + + /* end ? */ + if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) + return op-ostart; + + if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ + + return ERROR(corruption_detected); +} + + +static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt) +{ + FSE_DTableHeader DTableH; + memcpy(&DTableH, dt, sizeof(DTableH)); + + /* select fast mode (static) */ + if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); +} + + +static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* const istart = (const BYTE*)cSrc; + const BYTE* ip = istart; + short counting[FSE_MAX_SYMBOL_VALUE+1]; + DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ + unsigned tableLog; + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + size_t errorCode; + + if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ + + /* normal FSE decoding mode */ + errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); + if (FSE_isError(errorCode)) return errorCode; + if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ + ip += errorCode; + cSrcSize -= errorCode; + + errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); + if (FSE_isError(errorCode)) return errorCode; + + /* always return, even if it is an error code */ + return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); +} + + + +#endif /* FSE_COMMONDEFS_ONLY */ +/* ****************************************************************** + Huff0 : Huffman coder, part of New Generation Entropy library + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/**************************************************************** +* Compiler specifics +****************************************************************/ +#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +/* inline is defined */ +#elif defined(_MSC_VER) +# define inline __inline +#else +# define inline /* disable inline */ +#endif + + +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + + +/**************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include /* printf (debug) */ + +/**************************************************************** +* Error Management +****************************************************************/ +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/****************************************** +* Helper functions +******************************************/ +static unsigned HUF_isError(size_t code) { return ERR_isError(code); } + +#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */ +#define HUF_MAX_SYMBOL_VALUE 255 +#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) +# error "HUF_MAX_TABLELOG is too large !" +#endif + + + +/********************************************************* +* Huff0 : Huffman block decompression +*********************************************************/ +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ + +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ + +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; + +/*! HUF_readStats + Read compact Huffman tree, saved by HUF_writeCTable + @huffWeight : destination buffer + @return : size read from `src` +*/ +static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize) +{ + U32 weightTotal; + U32 tableLog; + const BYTE* ip = (const BYTE*) src; + size_t iSize; + size_t oSize; + U32 n; + + if (!srcSize) return ERROR(srcSize_wrong); + iSize = ip[0]; + //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ + + if (iSize >= 128) /* special header */ + { + if (iSize >= (242)) /* RLE */ + { + static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; + oSize = l[iSize-242]; + memset(huffWeight, 1, hwSize); + iSize = 0; + } + else /* Incompressible */ + { + oSize = iSize - 127; + iSize = ((oSize+1)/2); + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + if (oSize >= hwSize) return ERROR(corruption_detected); + ip += 1; + for (n=0; n> 4; + huffWeight[n+1] = ip[n/2] & 15; + } + } + } + else /* header compressed with FSE (normal case) */ + { + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ + if (FSE_isError(oSize)) return oSize; + } + + /* collect weight stats */ + memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); + weightTotal = 0; + for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); + rankStats[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; + } + if (weightTotal == 0) return ERROR(corruption_detected); + + /* get last non-null symbol weight (implied, total must be 2^n) */ + tableLog = BIT_highbit32(weightTotal) + 1; + if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); + { + U32 total = 1 << tableLog; + U32 rest = total - weightTotal; + U32 verif = 1 << BIT_highbit32(rest); + U32 lastWeight = BIT_highbit32(rest) + 1; + if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ + huffWeight[oSize] = (BYTE)lastWeight; + rankStats[lastWeight]++; + } + + /* check tree construction validity */ + if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ + + /* results */ + *nbSymbolsPtr = (U32)(oSize+1); + *tableLogPtr = tableLog; + return iSize+1; +} + + +/**************************/ +/* single-symbol decoding */ +/**************************/ + +static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize) +{ + BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; + U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ + U32 tableLog = 0; + const BYTE* ip = (const BYTE*) src; + size_t iSize = ip[0]; + U32 nbSymbols = 0; + U32 n; + U32 nextRankStart; + void* ptr = DTable+1; + HUF_DEltX2* const dt = (HUF_DEltX2*)ptr; + + HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ + //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* check result */ + if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ + DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */ + + /* Prepare ranks */ + nextRankStart = 0; + for (n=1; n<=tableLog; n++) + { + U32 current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } + + /* fill DTable */ + for (n=0; n> 1; + U32 i; + HUF_DEltX2 D; + D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); + for (i = rankVal[w]; i < rankVal[w] + length; i++) + dt[i] = D; + rankVal[w] += length; + } + + return iSize; +} + +static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) +{ + const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ + const BYTE c = dt[val].byte; + BIT_skipBits(Dstream, dt[val].nbBits); + return c; +} + +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ + *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 4 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) + { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + + /* closer to the end */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + /* no more data to retrieve from bitstream, hence no need to reload */ + while (p < pEnd) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + return pEnd-pStart; +} + + +static size_t HUF_decompress4X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const U16* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { + const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + + const void* ptr = DTable; + const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1; + const U32 dtLog = DTable[0]; + size_t errorCode; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + const size_t length1 = MEM_readLE16(istart); + const size_t length2 = MEM_readLE16(istart+2); + const size_t length3 = MEM_readLE16(istart+4); + size_t length4; + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + const size_t segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + + length4 = cSrcSize - (length1 + length2 + length3 + 6); + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + errorCode = BIT_initDStream(&bitD1, istart1, length1); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD2, istart2, length2); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD3, istart3, length3); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD4, istart4, length4); + if (HUF_isError(errorCode)) return errorCode; + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) + { + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 supposed already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + + /* check */ + endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endSignal) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; + } +} + + +static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG); + const BYTE* ip = (const BYTE*) cSrc; + size_t errorCode; + + errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize); + if (HUF_isError(errorCode)) return errorCode; + if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); + ip += errorCode; + cSrcSize -= errorCode; + + return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); +} + + +/***************************/ +/* double-symbols decoding */ +/***************************/ + +static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, + const U32* rankValOrigin, const int minWeight, + const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, + U32 nbBitsBaseline, U16 baseSeq) +{ + HUF_DEltX4 DElt; + U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; + U32 s; + + /* get pre-calculated rankVal */ + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill skipped values */ + if (minWeight>1) + { + U32 i, skipSize = rankVal[minWeight]; + MEM_writeLE16(&(DElt.sequence), baseSeq); + DElt.nbBits = (BYTE)(consumed); + DElt.length = 1; + for (i = 0; i < skipSize; i++) + DTable[i] = DElt; + } + + /* fill DTable */ + for (s=0; s= 1 */ + + rankVal[weight] += length; + } +} + +typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1]; + +static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, + const sortedSymbol_t* sortedList, const U32 sortedListSize, + const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32 nbBitsBaseline) +{ + U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; + const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ + const U32 minBits = nbBitsBaseline - maxWeight; + U32 s; + + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill DTable */ + for (s=0; s= minBits) /* enough room for a second symbol */ + { + U32 sortedRank; + int minWeight = nbBits + scaleLog; + if (minWeight < 1) minWeight = 1; + sortedRank = rankStart[minWeight]; + HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, + rankValOrigin[nbBits], minWeight, + sortedList+sortedRank, sortedListSize-sortedRank, + nbBitsBaseline, symbol); + } + else + { + U32 i; + const U32 end = start + length; + HUF_DEltX4 DElt; + + MEM_writeLE16(&(DElt.sequence), symbol); + DElt.nbBits = (BYTE)(nbBits); + DElt.length = 1; + for (i = start; i < end; i++) + DTable[i] = DElt; + } + rankVal[weight] += length; + } +} + +static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize) +{ + BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; + sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; + U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; + U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; + U32* const rankStart = rankStart0+1; + rankVal_t rankVal; + U32 tableLog, maxW, sizeOfSort, nbSymbols; + const U32 memLog = DTable[0]; + const BYTE* ip = (const BYTE*) src; + size_t iSize = ip[0]; + void* ptr = DTable; + HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1; + + HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ + if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); + //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* check result */ + if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + + /* find maxWeight */ + for (maxW = tableLog; rankStats[maxW]==0; maxW--) + {if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ + + /* Get start index of each weight */ + { + U32 w, nextRankStart = 0; + for (w=1; w<=maxW; w++) + { + U32 current = nextRankStart; + nextRankStart += rankStats[w]; + rankStart[w] = current; + } + rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ + sizeOfSort = nextRankStart; + } + + /* sort symbols by weight */ + { + U32 s; + for (s=0; s> consumed; + } + } + } + + HUF_fillDTableX4(dt, memLog, + sortedSymbol, sizeOfSort, + rankStart0, rankVal, maxW, + tableLog+1); + + return iSize; +} + + +static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; +} + +static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 1); + if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); + else + { + if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) + { + BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) + DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ + } + } + return 1; +} + + +#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7)) + { + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_1(p, bitDPtr); + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + } + + /* closer to the end */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2)) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + + if (p < pEnd) + p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); + + return p-pStart; +} + + + +static size_t HUF_decompress4X4_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const U32* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { + const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + + const void* ptr = DTable; + const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1; + const U32 dtLog = DTable[0]; + size_t errorCode; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + const size_t length1 = MEM_readLE16(istart); + const size_t length2 = MEM_readLE16(istart+2); + const size_t length3 = MEM_readLE16(istart+4); + size_t length4; + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + const size_t segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + + length4 = cSrcSize - (length1 + length2 + length3 + 6); + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + errorCode = BIT_initDStream(&bitD1, istart1, length1); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD2, istart2, length2); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD3, istart3, length3); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD4, istart4, length4); + if (HUF_isError(errorCode)) return errorCode; + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) + { + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_1(op1, &bitD1); + HUF_DECODE_SYMBOLX4_1(op2, &bitD2); + HUF_DECODE_SYMBOLX4_1(op3, &bitD3); + HUF_DECODE_SYMBOLX4_1(op4, &bitD4); + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_0(op1, &bitD1); + HUF_DECODE_SYMBOLX4_0(op2, &bitD2); + HUF_DECODE_SYMBOLX4_0(op3, &bitD3); + HUF_DECODE_SYMBOLX4_0(op4, &bitD4); + + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 supposed already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); + + /* check */ + endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endSignal) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; + } +} + + +static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG); + const BYTE* ip = (const BYTE*) cSrc; + + size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; + cSrcSize -= hSize; + + return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); +} + + +/**********************************/ +/* quad-symbol decoding */ +/**********************************/ +typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6; +typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6; + +/* recursive, up to level 3; may benefit from